diff options
Diffstat (limited to 'drivers/net/ethernet/intel')
45 files changed, 1608 insertions, 1677 deletions
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 767358b60507..639fbb12bd35 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -372,6 +372,7 @@ config IGC config IGC_LEDS def_bool LEDS_TRIGGER_NETDEV depends on IGC && LEDS_CLASS + depends on LEDS_CLASS=y || IGC=m help Optional support for controlling the NIC LED's with the netdev LED trigger. diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 01f0f12035ca..3fcb8daaa243 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -171,8 +171,8 @@ static int debug = 3; static int eeprom_bad_csum_allow = 0; static int use_io = 0; module_param(debug, int, 0); -module_param(eeprom_bad_csum_allow, int, 0); -module_param(use_io, int, 0); +module_param(eeprom_bad_csum_allow, int, 0444); +module_param(use_io, int, 0444); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index a2788fd5f8bb..19e450a5bd31 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2559,7 +2559,7 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw) hw->phy.ops.write_reg_page(hw, BM_RAR_H(i), (u16)(mac_reg & 0xFFFF)); hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i), - FIELD_GET(E1000_RAH_AV, mac_reg)); + (u16)((mac_reg & E1000_RAH_AV) >> 16)); } e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index af5d9d97a0d6..cc8c531ec3df 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6688,14 +6688,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool runtime) if (adapter->hw.phy.type == e1000_phy_igp_3) { e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw); } else if (hw->mac.type >= e1000_pch_lpt) { - if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) + if (wufc && !(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC))) { /* ULP does not support wake from unicast, multicast * or broadcast. */ retval = e1000_enable_ulp_lpt_lp(hw, !runtime); - - if (retval) - return retval; + if (retval) + return retval; + } } /* Ensure that the appropriate bits are set in LPI_CTRL diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f12092cdb1f0..f86578857e8a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -13208,12 +13208,12 @@ static netdev_features_t i40e_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len & ~(127 * 4)) goto out_err; @@ -13523,9 +13523,9 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) return err; i40e_queue_pair_disable_irq(vsi, queue_pair); + i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); - i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index af4269330581..ce1f11b8ad65 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -567,8 +567,7 @@ static inline bool i40e_is_fw_ver_lt(struct i40e_hw *hw, u16 maj, u16 min) **/ static inline bool i40e_is_fw_ver_eq(struct i40e_hw *hw, u16 maj, u16 min) { - return (hw->aq.fw_maj_ver > maj || - (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min)); + return (hw->aq.fw_maj_ver == maj && hw->aq.fw_min_ver == min); } #endif /* _I40E_PROTOTYPE_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 335fd13e86f7..aefec6bd3b67 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -4423,12 +4423,12 @@ static netdev_features_t iavf_features_check(struct sk_buff *skb, features &= ~NETIF_F_GSO_MASK; /* MACLEN can support at most 63 words */ - len = skb_network_header(skb) - skb->data; + len = skb_network_offset(skb); if (len & ~(63 * 2)) goto out_err; /* IPLEN and EIPLEN can support at most 127 dwords */ - len = skb_transport_header(skb) - skb_network_header(skb); + len = skb_network_header_len(skb); if (len & ~(127 * 4)) goto out_err; diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index ad953208f582..d2fd315556a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -190,15 +190,13 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx) q_vector = vsi->q_vectors[v_idx]; ice_for_each_tx_ring(tx_ring, q_vector->tx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, NULL); + ice_queue_set_napi(vsi, tx_ring->q_index, NETDEV_QUEUE_TYPE_TX, + NULL); tx_ring->q_vector = NULL; } ice_for_each_rx_ring(rx_ring, q_vector->rx) { - if (vsi->netdev) - netif_queue_set_napi(vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, NULL); + ice_queue_set_napi(vsi, rx_ring->q_index, NETDEV_QUEUE_TYPE_RX, + NULL); rx_ring->q_vector = NULL; } diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 9266f25a9978..4d8111aeb0ff 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1002,9 +1002,9 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw) */ int ice_init_hw(struct ice_hw *hw) { - struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); + void *mac_buf __free(kfree); u16 mac_buf_len; - void *mac_buf; int status; /* Set MAC type based on DeviceID */ @@ -1082,7 +1082,7 @@ int ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) { status = -ENOMEM; goto err_unroll_sched; @@ -1092,7 +1092,6 @@ int ice_init_hw(struct ice_hw *hw) status = ice_aq_get_phy_caps(hw->port_info, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); - devm_kfree(ice_hw_to_dev(hw), pcaps); if (status) dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", status); @@ -1119,18 +1118,15 @@ int ice_init_hw(struct ice_hw *hw) /* Get MAC information */ /* A single port can report up to two (LAN and WoL) addresses */ - mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, - sizeof(struct ice_aqc_manage_mac_read_resp), - GFP_KERNEL); - mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); - + mac_buf = kcalloc(2, sizeof(struct ice_aqc_manage_mac_read_resp), + GFP_KERNEL); if (!mac_buf) { status = -ENOMEM; goto err_unroll_fltr_mgmt_struct; } + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL); - devm_kfree(ice_hw_to_dev(hw), mac_buf); if (status) goto err_unroll_fltr_mgmt_struct; @@ -1399,9 +1395,8 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * it to HW register space and enables the hardware to prefetch descriptors * instead of only fetching them on demand */ -int -ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, - u32 rxq_index) +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index) { u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 }; @@ -3277,19 +3272,14 @@ int ice_update_link_info(struct ice_port_info *pi) return status; if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) { - struct ice_aqc_get_phy_caps_data *pcaps; - struct ice_hw *hw; + struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); - hw = pi->hw; - pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), - GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps, NULL); - - devm_kfree(ice_hw_to_dev(hw), pcaps); } return status; @@ -3430,8 +3420,8 @@ ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, int ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) { + struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); struct ice_aqc_set_phy_cfg_data cfg = { 0 }; - struct ice_aqc_get_phy_caps_data *pcaps; struct ice_hw *hw; int status; @@ -3441,7 +3431,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) *aq_failures = 0; hw = pi->hw; - pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL); + pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); if (!pcaps) return -ENOMEM; @@ -3493,7 +3483,6 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) } out: - devm_kfree(ice_hw_to_dev(hw), pcaps); return status; } @@ -3572,7 +3561,7 @@ int ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec) { - struct ice_aqc_get_phy_caps_data *pcaps; + struct ice_aqc_get_phy_caps_data *pcaps __free(kfree); struct ice_hw *hw; int status; @@ -3641,8 +3630,6 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, } out: - kfree(pcaps); - return status; } @@ -4362,13 +4349,13 @@ ice_aq_add_rdma_qsets(struct ice_hw *hw, u8 num_qset_grps, /* End of FW Admin Queue command wrappers */ /** - * ice_write_byte - write a byte to a packed context structure - * @src_ctx: the context structure to read from - * @dest_ctx: the context to be written to - * @ce_info: a description of the struct to be filled + * ice_pack_ctx_byte - write a byte to a packed context structure + * @src_ctx: unpacked source context structure + * @dest_ctx: packed destination context data + * @ce_info: context element description */ -static void -ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +static void ice_pack_ctx_byte(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { u8 src_byte, dest_byte, mask; u8 *from, *dest; @@ -4379,14 +4366,11 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = (u8)(BIT(ce_info->width) - 1); + mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); src_byte = *from; - src_byte &= mask; - - /* shift to correct alignment */ - mask <<= shift_width; src_byte <<= shift_width; + src_byte &= mask; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); @@ -4401,13 +4385,13 @@ ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) } /** - * ice_write_word - write a word to a packed context structure - * @src_ctx: the context structure to read from - * @dest_ctx: the context to be written to - * @ce_info: a description of the struct to be filled + * ice_pack_ctx_word - write a word to a packed context structure + * @src_ctx: unpacked source context structure + * @dest_ctx: packed destination context data + * @ce_info: context element description */ -static void -ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +static void ice_pack_ctx_word(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { u16 src_word, mask; __le16 dest_word; @@ -4419,17 +4403,14 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - mask = BIT(ce_info->width) - 1; + mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_word = *(u16 *)from; - src_word &= mask; - - /* shift to correct alignment */ - mask <<= shift_width; src_word <<= shift_width; + src_word &= mask; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); @@ -4444,13 +4425,13 @@ ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) } /** - * ice_write_dword - write a dword to a packed context structure - * @src_ctx: the context structure to read from - * @dest_ctx: the context to be written to - * @ce_info: a description of the struct to be filled + * ice_pack_ctx_dword - write a dword to a packed context structure + * @src_ctx: unpacked source context structure + * @dest_ctx: packed destination context data + * @ce_info: context element description */ -static void -ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +static void ice_pack_ctx_dword(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { u32 src_dword, mask; __le32 dest_dword; @@ -4462,25 +4443,14 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - - /* if the field width is exactly 32 on an x86 machine, then the shift - * operation will not work because the SHL instructions count is masked - * to 5 bits so the shift will do nothing - */ - if (ce_info->width < 32) - mask = BIT(ce_info->width) - 1; - else - mask = (u32)~0; + mask = GENMASK(ce_info->width - 1 + shift_width, shift_width); /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_dword = *(u32 *)from; - src_dword &= mask; - - /* shift to correct alignment */ - mask <<= shift_width; src_dword <<= shift_width; + src_dword &= mask; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); @@ -4495,13 +4465,13 @@ ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) } /** - * ice_write_qword - write a qword to a packed context structure - * @src_ctx: the context structure to read from - * @dest_ctx: the context to be written to - * @ce_info: a description of the struct to be filled + * ice_pack_ctx_qword - write a qword to a packed context structure + * @src_ctx: unpacked source context structure + * @dest_ctx: packed destination context data + * @ce_info: context element description */ -static void -ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) +static void ice_pack_ctx_qword(u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { u64 src_qword, mask; __le64 dest_qword; @@ -4513,25 +4483,14 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) /* prepare the bits and mask */ shift_width = ce_info->lsb % 8; - - /* if the field width is exactly 64 on an x86 machine, then the shift - * operation will not work because the SHL instructions count is masked - * to 6 bits so the shift will do nothing - */ - if (ce_info->width < 64) - mask = BIT_ULL(ce_info->width) - 1; - else - mask = (u64)~0; + mask = GENMASK_ULL(ce_info->width - 1 + shift_width, shift_width); /* don't swizzle the bits until after the mask because the mask bits * will be in a different bit position on big endian machines */ src_qword = *(u64 *)from; - src_qword &= mask; - - /* shift to correct alignment */ - mask <<= shift_width; src_qword <<= shift_width; + src_qword &= mask; /* get the current bits from the target bit string */ dest = dest_ctx + (ce_info->lsb / 8); @@ -4550,11 +4509,10 @@ ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info) * @hw: pointer to the hardware structure * @src_ctx: pointer to a generic non-packed context structure * @dest_ctx: pointer to memory for the packed structure - * @ce_info: a description of the structure to be transformed + * @ce_info: List of Rx context elements */ -int -ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info) +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info) { int f; @@ -4570,16 +4528,16 @@ ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, } switch (ce_info[f].size_of) { case sizeof(u8): - ice_write_byte(src_ctx, dest_ctx, &ce_info[f]); + ice_pack_ctx_byte(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u16): - ice_write_word(src_ctx, dest_ctx, &ce_info[f]); + ice_pack_ctx_word(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u32): - ice_write_dword(src_ctx, dest_ctx, &ce_info[f]); + ice_pack_ctx_dword(src_ctx, dest_ctx, &ce_info[f]); break; case sizeof(u64): - ice_write_qword(src_ctx, dest_ctx, &ce_info[f]); + ice_pack_ctx_qword(src_ctx, dest_ctx, &ce_info[f]); break; default: return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index 32fd10de620c..ffb22c7ce28b 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -53,9 +53,8 @@ int ice_get_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw); -int -ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, - u32 rxq_index); +int ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, + u32 rxq_index); int ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params); @@ -72,9 +71,8 @@ bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq); int ice_aq_q_shutdown(struct ice_hw *hw, bool unloading); void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode); extern const struct ice_ctx_ele ice_tlan_ctx_info[]; -int -ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, - const struct ice_ctx_ele *ce_info); +int ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx, + const struct ice_ctx_ele *ce_info); extern struct mutex ice_global_cfg_lock_sw; diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c index c0256564e998..e92be6f130a3 100644 --- a/drivers/net/ethernet/intel/ice/ice_dpll.c +++ b/drivers/net/ethernet/intel/ice/ice_dpll.c @@ -31,6 +31,26 @@ static const char * const pin_type_name[] = { }; /** + * ice_dpll_is_reset - check if reset is in progress + * @pf: private board structure + * @extack: error reporting + * + * If reset is in progress, fill extack with error. + * + * Return: + * * false - no reset in progress + * * true - reset in progress + */ +static bool ice_dpll_is_reset(struct ice_pf *pf, struct netlink_ext_ack *extack) +{ + if (ice_is_reset_in_progress(pf->state)) { + NL_SET_ERR_MSG(extack, "PF reset in progress"); + return true; + } + return false; +} + +/** * ice_dpll_pin_freq_set - set pin's frequency * @pf: private board structure * @pin: pointer to a pin @@ -109,6 +129,9 @@ ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack); mutex_unlock(&pf->dplls.lock); @@ -254,6 +277,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, * ice_dpll_pin_enable - enable a pin on dplls * @hw: board private hw structure * @pin: pointer to a pin + * @dpll_idx: dpll index to connect to output pin * @pin_type: type of pin being enabled * @extack: error reporting * @@ -266,7 +290,7 @@ ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv, */ static int ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, - enum ice_dpll_pin_type pin_type, + u8 dpll_idx, enum ice_dpll_pin_type pin_type, struct netlink_ext_ack *extack) { u8 flags = 0; @@ -280,10 +304,12 @@ ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin, ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0); break; case ICE_DPLL_PIN_TYPE_OUTPUT: + flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL; if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN) flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN; flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN; - ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0); + ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, dpll_idx, + 0, 0); break; default: return -EINVAL; @@ -370,7 +396,7 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, case ICE_DPLL_PIN_TYPE_INPUT: ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL, NULL, &pin->flags[0], - &pin->freq, NULL); + &pin->freq, &pin->phase_adjust); if (ret) goto err; if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) { @@ -398,14 +424,27 @@ ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin, break; case ICE_DPLL_PIN_TYPE_OUTPUT: ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx, - &pin->flags[0], NULL, + &pin->flags[0], &parent, &pin->freq, NULL); if (ret) goto err; - if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) - pin->state[0] = DPLL_PIN_STATE_CONNECTED; - else - pin->state[0] = DPLL_PIN_STATE_DISCONNECTED; + + parent &= ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL; + if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0]) { + pin->state[pf->dplls.eec.dpll_idx] = + parent == pf->dplls.eec.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + parent == pf->dplls.pps.dpll_idx ? + DPLL_PIN_STATE_CONNECTED : + DPLL_PIN_STATE_DISCONNECTED; + } else { + pin->state[pf->dplls.eec.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + pin->state[pf->dplls.pps.dpll_idx] = + DPLL_PIN_STATE_DISCONNECTED; + } break; case ICE_DPLL_PIN_TYPE_RCLK_INPUT: for (parent = 0; parent < pf->dplls.rclk.num_parents; @@ -570,9 +609,13 @@ ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); if (enable) - ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack); + ret = ice_dpll_pin_enable(&pf->hw, p, d->dpll_idx, pin_type, + extack); else ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack); if (!ret) @@ -605,6 +648,11 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, struct netlink_ext_ack *extack) { bool enable = state == DPLL_PIN_STATE_CONNECTED; + struct ice_dpll_pin *p = pin_priv; + struct ice_dpll *d = dpll_priv; + + if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) + return 0; return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable, extack, ICE_DPLL_PIN_TYPE_OUTPUT); @@ -667,14 +715,16 @@ ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_pin_state_update(pf, p, pin_type, extack); if (ret) goto unlock; - if (pin_type == ICE_DPLL_PIN_TYPE_INPUT) + if (pin_type == ICE_DPLL_PIN_TYPE_INPUT || + pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) *state = p->state[d->dpll_idx]; - else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT) - *state = p->state[0]; ret = 0; unlock: mutex_unlock(&pf->dplls.lock); @@ -792,6 +842,9 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv, struct ice_pf *pf = d->pf; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack); mutex_unlock(&pf->dplls.lock); @@ -912,6 +965,9 @@ ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv, u8 flag, flags_en = 0; int ret; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); switch (type) { case ICE_DPLL_PIN_TYPE_INPUT: @@ -1071,6 +1127,9 @@ ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1125,6 +1184,9 @@ ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv, int ret = -EINVAL; u32 hw_idx; + if (ice_dpll_is_reset(pf, extack)) + return -EBUSY; + mutex_lock(&pf->dplls.lock); hw_idx = parent->idx - pf->dplls.base_rclk_idx; if (hw_idx >= pf->dplls.num_inputs) @@ -1307,8 +1369,10 @@ static void ice_dpll_periodic_work(struct kthread_work *work) struct ice_pf *pf = container_of(d, struct ice_pf, dplls); struct ice_dpll *de = &pf->dplls.eec; struct ice_dpll *dp = &pf->dplls.pps; - int ret; + int ret = 0; + if (ice_is_reset_in_progress(pf->state)) + goto resched; mutex_lock(&pf->dplls.lock); ret = ice_dpll_update_state(pf, de, false); if (!ret) @@ -1328,6 +1392,7 @@ static void ice_dpll_periodic_work(struct kthread_work *work) ice_dpll_notify_changes(de); ice_dpll_notify_changes(dp); +resched: /* Run twice a second or reschedule if update failed */ kthread_queue_delayed_work(d->kworker, &d->work, ret ? msecs_to_jiffies(10) : @@ -1534,7 +1599,7 @@ static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf) } if (WARN_ON_ONCE(!vsi || !vsi->netdev)) return; - netdev_dpll_pin_clear(vsi->netdev); + dpll_netdev_pin_clear(vsi->netdev); dpll_pin_put(rclk->pin); } @@ -1578,7 +1643,7 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, } if (WARN_ON((!vsi || !vsi->netdev))) return -EINVAL; - netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin); + dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); return 0; @@ -2057,6 +2122,7 @@ void ice_dpll_init(struct ice_pf *pf) struct ice_dplls *d = &pf->dplls; int err = 0; + mutex_init(&d->lock); err = ice_dpll_init_info(pf, cgu); if (err) goto err_exit; @@ -2069,7 +2135,6 @@ void ice_dpll_init(struct ice_pf *pf) err = ice_dpll_init_pins(pf, cgu); if (err) goto deinit_pps; - mutex_init(&d->lock); if (cgu) { err = ice_dpll_init_worker(pf); if (err) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 3cc364a4d682..bb912155676e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -802,7 +802,7 @@ static int ice_lbtest_create_frame(struct ice_pf *pf, u8 **ret_data, u16 size) if (!pf) return -EINVAL; - data = devm_kzalloc(ice_pf_to_dev(pf), size, GFP_KERNEL); + data = kzalloc(size, GFP_KERNEL); if (!data) return -ENOMEM; @@ -945,11 +945,9 @@ static u64 ice_loopback_test(struct net_device *netdev) int num_frames, valid_frames; struct ice_tx_ring *tx_ring; struct ice_rx_ring *rx_ring; - struct device *dev; - u8 *tx_frame; + u8 *tx_frame __free(kfree); int i; - dev = ice_pf_to_dev(pf); netdev_info(netdev, "loopback test\n"); test_vsi = ice_lb_vsi_setup(pf, pf->hw.port_info); @@ -994,7 +992,7 @@ static u64 ice_loopback_test(struct net_device *netdev) for (i = 0; i < num_frames; i++) { if (ice_diag_send(tx_ring, tx_frame, ICE_LB_FRAME_SIZE)) { ret = 8; - goto lbtest_free_frame; + goto remove_mac_filters; } } @@ -1004,8 +1002,6 @@ static u64 ice_loopback_test(struct net_device *netdev) else if (valid_frames != num_frames) ret = 10; -lbtest_free_frame: - devm_kfree(dev, tx_frame); remove_mac_filters: if (ice_fltr_remove_mac(test_vsi, broadcast, ICE_FWD_TO_VSI)) netdev_err(netdev, "Could not remove MAC filter for the test VSI\n"); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 60e0d824195e..71f061667980 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2297,7 +2297,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ice_vsi_map_rings_to_vectors(vsi); /* Associate q_vector rings to napi */ - ice_vsi_set_napi_queues(vsi, true); + ice_vsi_set_napi_queues(vsi); vsi->stat_offsets_loaded = false; @@ -2720,74 +2720,19 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked) } /** - * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI - * @vsi: the VSI being un-configured - */ -void ice_vsi_dis_irq(struct ice_vsi *vsi) -{ - struct ice_pf *pf = vsi->back; - struct ice_hw *hw = &pf->hw; - u32 val; - int i; - - /* disable interrupt causation from each queue */ - if (vsi->tx_rings) { - ice_for_each_txq(vsi, i) { - if (vsi->tx_rings[i]) { - u16 reg; - - reg = vsi->tx_rings[i]->reg_idx; - val = rd32(hw, QINT_TQCTL(reg)); - val &= ~QINT_TQCTL_CAUSE_ENA_M; - wr32(hw, QINT_TQCTL(reg), val); - } - } - } - - if (vsi->rx_rings) { - ice_for_each_rxq(vsi, i) { - if (vsi->rx_rings[i]) { - u16 reg; - - reg = vsi->rx_rings[i]->reg_idx; - val = rd32(hw, QINT_RQCTL(reg)); - val &= ~QINT_RQCTL_CAUSE_ENA_M; - wr32(hw, QINT_RQCTL(reg), val); - } - } - } - - /* disable each interrupt */ - ice_for_each_q_vector(vsi, i) { - if (!vsi->q_vectors[i]) - continue; - wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); - } - - ice_flush(hw); - - /* don't call synchronize_irq() for VF's from the host */ - if (vsi->type == ICE_VSI_VF) - return; - - ice_for_each_q_vector(vsi, i) - synchronize_irq(vsi->q_vectors[i]->irq.virq); -} - -/** - * ice_queue_set_napi - Set the napi instance for the queue + * __ice_queue_set_napi - Set the napi instance for the queue * @dev: device to which NAPI and queue belong * @queue_index: Index of queue * @type: queue type as RX or TX * @napi: NAPI context * @locked: is the rtnl_lock already held * - * Set the napi instance for the queue + * Set the napi instance for the queue. Caller indicates the lock status. */ static void -ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, - enum netdev_queue_type type, struct napi_struct *napi, - bool locked) +__ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi, + bool locked) { if (!locked) rtnl_lock(); @@ -2797,26 +2742,79 @@ ice_queue_set_napi(struct net_device *dev, unsigned int queue_index, } /** - * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * ice_queue_set_napi - Set the napi instance for the queue + * @vsi: VSI being configured + * @queue_index: Index of queue + * @type: queue type as RX or TX + * @napi: NAPI context + * + * Set the napi instance for the queue. The rtnl lock state is derived from the + * execution path. + */ +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi) +{ + struct ice_pf *pf = vsi->back; + + if (!vsi->netdev) + return; + + if (current_work() == &pf->serv_task || + test_bit(ICE_PREPARED_FOR_RESET, pf->state) || + test_bit(ICE_DOWN, pf->state) || + test_bit(ICE_SUSPENDED, pf->state)) + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + false); + else + __ice_queue_set_napi(vsi->netdev, queue_index, type, napi, + true); +} + +/** + * __ice_q_vector_set_napi_queues - Map queue[s] associated with the napi * @q_vector: q_vector pointer * @locked: is the rtnl_lock already held * + * Associate the q_vector napi with all the queue[s] on the vector. + * Caller indicates the lock status. + */ +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +{ + struct ice_rx_ring *rx_ring; + struct ice_tx_ring *tx_ring; + + ice_for_each_rx_ring(rx_ring, q_vector->rx) + __ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi, + locked); + + ice_for_each_tx_ring(tx_ring, q_vector->tx) + __ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi, + locked); + /* Also set the interrupt number for the NAPI */ + netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); +} + +/** + * ice_q_vector_set_napi_queues - Map queue[s] associated with the napi + * @q_vector: q_vector pointer + * * Associate the q_vector napi with all the queue[s] on the vector */ -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector) { struct ice_rx_ring *rx_ring; struct ice_tx_ring *tx_ring; ice_for_each_rx_ring(rx_ring, q_vector->rx) - ice_queue_set_napi(q_vector->vsi->netdev, rx_ring->q_index, - NETDEV_QUEUE_TYPE_RX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, rx_ring->q_index, + NETDEV_QUEUE_TYPE_RX, &q_vector->napi); ice_for_each_tx_ring(tx_ring, q_vector->tx) - ice_queue_set_napi(q_vector->vsi->netdev, tx_ring->q_index, - NETDEV_QUEUE_TYPE_TX, &q_vector->napi, - locked); + ice_queue_set_napi(q_vector->vsi, tx_ring->q_index, + NETDEV_QUEUE_TYPE_TX, &q_vector->napi); /* Also set the interrupt number for the NAPI */ netif_napi_set_irq(&q_vector->napi, q_vector->irq.virq); } @@ -2824,11 +2822,10 @@ void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked) /** * ice_vsi_set_napi_queues * @vsi: VSI pointer - * @locked: is the rtnl_lock already held * * Associate queue[s] with napi for all vectors */ -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) +void ice_vsi_set_napi_queues(struct ice_vsi *vsi) { int i; @@ -2836,7 +2833,7 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked) return; ice_for_each_q_vector(vsi, i) - ice_q_vector_set_napi_queues(vsi->q_vectors[i], locked); + ice_q_vector_set_napi_queues(vsi->q_vectors[i]); } /** @@ -3011,7 +3008,7 @@ ice_vsi_realloc_stat_arrays(struct ice_vsi *vsi) } } - tx_ring_stats = vsi_stat->rx_ring_stats; + tx_ring_stats = vsi_stat->tx_ring_stats; vsi_stat->tx_ring_stats = krealloc_array(vsi_stat->tx_ring_stats, req_txq, sizeof(*vsi_stat->tx_ring_stats), diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index 0c77d581416a..9cd23afe5f15 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -81,9 +81,15 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc); struct ice_vsi * ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params); -void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); +void +ice_queue_set_napi(struct ice_vsi *vsi, unsigned int queue_index, + enum netdev_queue_type type, struct napi_struct *napi); + +void __ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector, bool locked); -void ice_vsi_set_napi_queues(struct ice_vsi *vsi, bool locked); +void ice_q_vector_set_napi_queues(struct ice_q_vector *q_vector); + +void ice_vsi_set_napi_queues(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi); @@ -104,8 +110,6 @@ void ice_write_qrxflxp_cntxt(struct ice_hw *hw, u16 pf_q, u32 rxdid, u32 prio, bool ena_ts); -void ice_vsi_dis_irq(struct ice_vsi *vsi); - void ice_vsi_free_irq(struct ice_vsi *vsi); void ice_vsi_free_rx_rings(struct ice_vsi *vsi); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 9c2c8637b4a7..5d7660bc8846 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3497,7 +3497,7 @@ static void ice_napi_add(struct ice_vsi *vsi) ice_for_each_q_vector(vsi, v_idx) { netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi, ice_napi_poll); - ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); + __ice_q_vector_set_napi_queues(vsi->q_vectors[v_idx], false); } } @@ -5383,6 +5383,7 @@ static int ice_reinit_interrupt_scheme(struct ice_pf *pf) if (ret) goto err_reinit; ice_vsi_map_rings_to_vectors(pf->vsi[v]); + ice_vsi_set_napi_queues(pf->vsi[v]); } ret = ice_req_irq_msix_misc(pf); @@ -7001,6 +7002,50 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) } /** + * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI + * @vsi: the VSI being un-configured + */ +static void ice_vsi_dis_irq(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + u32 val; + int i; + + /* disable interrupt causation from each Rx queue; Tx queues are + * handled in ice_vsi_stop_tx_ring() + */ + if (vsi->rx_rings) { + ice_for_each_rxq(vsi, i) { + if (vsi->rx_rings[i]) { + u16 reg; + + reg = vsi->rx_rings[i]->reg_idx; + val = rd32(hw, QINT_RQCTL(reg)); + val &= ~QINT_RQCTL_CAUSE_ENA_M; + wr32(hw, QINT_RQCTL(reg), val); + } + } + } + + /* disable each interrupt */ + ice_for_each_q_vector(vsi, i) { + if (!vsi->q_vectors[i]) + continue; + wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); + } + + ice_flush(hw); + + /* don't call synchronize_irq() for VF's from the host */ + if (vsi->type == ICE_VSI_VF) + return; + + ice_for_each_q_vector(vsi, i) + synchronize_irq(vsi->q_vectors[i]->irq.virq); +} + +/** * ice_down - Shutdown the connection * @vsi: The VSI being stopped * @@ -7953,6 +7998,8 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, pf_sw = pf->first_sw; /* find the attribute in the netlink message */ br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); + if (!br_spec) + return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { __u16 mode; diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index a94a1c48c3de..a958fcf3e6be 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -240,7 +240,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf) } vf->lan_vsi_idx = vsi->idx; - vf->lan_vsi_num = vsi->vsi_num; return vsi; } @@ -1068,6 +1067,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) struct ice_pf *pf = pci_get_drvdata(pdev); u16 prev_msix, prev_queues, queues; bool needs_rebuild = false; + struct ice_vsi *vsi; struct ice_vf *vf; int id; @@ -1102,6 +1102,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (!vf) return -ENOENT; + vsi = ice_get_vf_vsi(vf); + if (!vsi) + return -ENOENT; + prev_msix = vf->num_msix; prev_queues = vf->num_vf_qs; @@ -1122,7 +1126,7 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) if (vf->first_vector_idx < 0) goto unroll; - if (ice_vf_reconfig_vsi(vf)) { + if (ice_vf_reconfig_vsi(vf) || ice_vf_init_host_cfg(vf, vsi)) { /* Try to rebuild with previous values */ needs_rebuild = true; goto unroll; @@ -1148,8 +1152,10 @@ unroll: if (vf->first_vector_idx < 0) return -EINVAL; - if (needs_rebuild) + if (needs_rebuild) { ice_vf_reconfig_vsi(vf); + ice_vf_init_host_cfg(vf, vsi); + } ice_ena_vf_mappings(vf); ice_put_vf(vf); diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 2ffdae9a82df..21d26e19338a 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -280,12 +280,6 @@ int ice_vf_reconfig_vsi(struct ice_vf *vf) return err; } - /* Update the lan_vsi_num field since it might have been changed. The - * PF lan_vsi_idx number remains the same so we don't need to change - * that. - */ - vf->lan_vsi_num = vsi->vsi_num; - return 0; } @@ -315,7 +309,6 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf) * vf->lan_vsi_idx */ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); - vf->lan_vsi_num = vsi->vsi_num; return 0; } @@ -1315,13 +1308,12 @@ int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi) } /** - * ice_vf_invalidate_vsi - invalidate vsi_idx/vsi_num to remove VSI access + * ice_vf_invalidate_vsi - invalidate vsi_idx to remove VSI access * @vf: VF to remove access to VSI for */ void ice_vf_invalidate_vsi(struct ice_vf *vf) { vf->lan_vsi_idx = ICE_NO_VSI; - vf->lan_vsi_num = ICE_NO_VSI; } /** diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h index 0cc9034065c5..fec16919ec19 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h @@ -109,11 +109,6 @@ struct ice_vf { u8 spoofchk:1; u8 link_forced:1; u8 link_up:1; /* only valid if VF link is forced */ - /* VSI indices - actual VSI pointers are maintained in the PF structure - * When assigned, these will be non-zero, because VSI 0 is always - * the main LAN VSI for the PF. - */ - u16 lan_vsi_num; /* ID as used by firmware */ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */ DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index c925813ec9ca..1ff9818b4c84 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -440,7 +440,6 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vf->driver_caps = *(u32 *)msg; else vf->driver_caps = VIRTCHNL_VF_OFFLOAD_L2 | - VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_VLAN; vfres->vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2; @@ -453,14 +452,8 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->vf_cap_flags |= ice_vc_get_vlan_caps(hw, vf, vsi, vf->driver_caps); - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) { + if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_PF) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_PF; - } else { - if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RSS_AQ) - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_AQ; - else - vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RSS_REG; - } if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC; @@ -506,7 +499,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg) vfres->rss_lut_size = ICE_LUT_VSI_SIZE; vfres->max_mtu = ice_vc_get_max_frame_size(vf); - vfres->vsi_res[0].vsi_id = vf->lan_vsi_num; + vfres->vsi_res[0].vsi_id = ICE_VF_VSI_ID; vfres->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV; vfres->vsi_res[0].num_queue_pairs = vsi->num_txq; ether_addr_copy(vfres->vsi_res[0].default_mac_addr, @@ -552,27 +545,20 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) */ bool ice_vc_isvalid_vsi_id(struct ice_vf *vf, u16 vsi_id) { - struct ice_pf *pf = vf->pf; - struct ice_vsi *vsi; - - vsi = ice_find_vsi(pf, vsi_id); - - return (vsi && (vsi->vf == vf)); + return vsi_id == ICE_VF_VSI_ID; } /** * ice_vc_isvalid_q_id - * @vf: pointer to the VF info - * @vsi_id: VSI ID + * @vsi: VSI to check queue ID against * @qid: VSI relative queue ID * * check for the valid queue ID */ -static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) +static bool ice_vc_isvalid_q_id(struct ice_vsi *vsi, u8 qid) { - struct ice_vsi *vsi = ice_find_vsi(vf->pf, vsi_id); /* allocated Tx and Rx queues should be always equal for VF VSI */ - return (vsi && (qid < vsi->alloc_txq)); + return qid < vsi->alloc_txq; } /** @@ -1330,7 +1316,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) */ q_map = vqs->rx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1352,7 +1338,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1457,7 +1443,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) q_map = vqs->tx_queues; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1483,7 +1469,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF); } else if (q_map) { for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) { - if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) { + if (!ice_vc_isvalid_q_id(vsi, vf_q_id)) { v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1539,7 +1525,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { vsi_q_id = vsi_q_id_idx; - if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + if (!ice_vc_isvalid_q_id(vsi, vsi_q_id)) return VIRTCHNL_STATUS_ERR_PARAM; q_vector->num_ring_rx++; @@ -1553,7 +1539,7 @@ ice_cfg_interrupt(struct ice_vf *vf, struct ice_vsi *vsi, u16 vector_id, for_each_set_bit(vsi_q_id_idx, &qmap, ICE_MAX_RSS_QS_PER_VF) { vsi_q_id = vsi_q_id_idx; - if (!ice_vc_isvalid_q_id(vf, vsi->vsi_num, vsi_q_id)) + if (!ice_vc_isvalid_q_id(vsi, vsi_q_id)) return VIRTCHNL_STATUS_ERR_PARAM; q_vector->num_ring_tx++; @@ -1710,7 +1696,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) qpi->txq.headwb_enabled || !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || - !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { + !ice_vc_isvalid_q_id(vsi, qpi->txq.queue_id)) { goto error_param; } diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.h b/drivers/net/ethernet/intel/ice/ice_virtchnl.h index 60dfbe05980a..3a4115869153 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.h @@ -19,6 +19,15 @@ #define ICE_MAX_MACADDR_PER_VF 18 #define ICE_FLEX_DESC_RXDID_MAX_NUM 64 +/* VFs only get a single VSI. For ice hardware, the VF does not need to know + * its VSI index. However, the virtchnl interface requires a VSI number, + * mainly due to legacy hardware. + * + * Since the VF doesn't need this information, report a static value to the VF + * instead of leaking any information about the PF or hardware setup. + */ +#define ICE_VF_VSI_ID 1 + struct ice_virtchnl_ops { int (*get_ver_msg)(struct ice_vf *vf, u8 *msg); int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg); diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c index 5e19d48a05b4..d796dbd2a440 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_allowlist.c @@ -13,8 +13,6 @@ * - opcodes needed by VF when caps are activated * * Caps that don't use new opcodes (no opcodes should be allowed): - * - VIRTCHNL_VF_OFFLOAD_RSS_AQ - * - VIRTCHNL_VF_OFFLOAD_RSS_REG * - VIRTCHNL_VF_OFFLOAD_WB_ON_ITR * - VIRTCHNL_VF_OFFLOAD_CRC * - VIRTCHNL_VF_OFFLOAD_RX_POLLING diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c index f001553e1a1a..8e4ff3af86c6 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c @@ -94,9 +94,6 @@ ice_vc_fdir_param_check(struct ice_vf *vf, u16 vsi_id) if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_FDIR_PF)) return -EINVAL; - if (vsi_id != vf->lan_vsi_num) - return -EINVAL; - if (!ice_vc_isvalid_vsi_id(vf, vsi_id)) return -EINVAL; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 8a051420fa19..1857220d27fe 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -179,6 +179,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) return -EBUSY; usleep_range(1000, 2000); } + + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + ice_qvec_toggle_napi(vsi, q_vector, false); + netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); ice_fill_txq_meta(vsi, tx_ring, &txq_meta); @@ -195,13 +199,10 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } - ice_qvec_dis_irq(vsi, rx_ring, q_vector); - err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; - ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); ice_qp_reset_stats(vsi, q_idx); @@ -245,11 +246,11 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx) if (err) return err; - clear_bit(ICE_CFG_BUSY, vsi->state); ice_qvec_toggle_napi(vsi, q_vector, true); ice_qvec_ena_irq(vsi, q_vector); netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); + clear_bit(ICE_CFG_BUSY, vsi->state); return 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 0acc125decb3..e7a036538246 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -37,8 +37,6 @@ struct idpf_vport_max_q; #define IDPF_MB_MAX_ERR 20 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) -#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000 -#define IDPF_WAIT_FOR_EVENT_TIMEO 60000 #define IDPF_MAX_WAIT 500 @@ -66,14 +64,12 @@ struct idpf_mac_filter { /** * enum idpf_state - State machine to handle bring up - * @__IDPF_STARTUP: Start the state machine * @__IDPF_VER_CHECK: Negotiate virtchnl version * @__IDPF_GET_CAPS: Negotiate capabilities * @__IDPF_INIT_SW: Init based on given capabilities * @__IDPF_STATE_LAST: Must be last, used to determine size */ enum idpf_state { - __IDPF_STARTUP, __IDPF_VER_CHECK, __IDPF_GET_CAPS, __IDPF_INIT_SW, @@ -87,6 +83,7 @@ enum idpf_state { * @IDPF_HR_RESET_IN_PROG: Reset in progress * @IDPF_REMOVE_IN_PROG: Driver remove in progress * @IDPF_MB_INTR_MODE: Mailbox in interrupt mode + * @IDPF_VC_CORE_INIT: virtchnl core has been init * @IDPF_FLAGS_NBITS: Must be last */ enum idpf_flags { @@ -95,6 +92,7 @@ enum idpf_flags { IDPF_HR_RESET_IN_PROG, IDPF_REMOVE_IN_PROG, IDPF_MB_INTR_MODE, + IDPF_VC_CORE_INIT, IDPF_FLAGS_NBITS, }; @@ -209,71 +207,6 @@ struct idpf_dev_ops { struct idpf_reg_ops reg_ops; }; -/* These macros allow us to generate an enum and a matching char * array of - * stringified enums that are always in sync. Checkpatch issues a bogus warning - * about this being a complex macro; but it's wrong, these are never used as a - * statement and instead only used to define the enum and array. - */ -#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \ - STATE(IDPF_VC_CREATE_VPORT) \ - STATE(IDPF_VC_CREATE_VPORT_ERR) \ - STATE(IDPF_VC_ENA_VPORT) \ - STATE(IDPF_VC_ENA_VPORT_ERR) \ - STATE(IDPF_VC_DIS_VPORT) \ - STATE(IDPF_VC_DIS_VPORT_ERR) \ - STATE(IDPF_VC_DESTROY_VPORT) \ - STATE(IDPF_VC_DESTROY_VPORT_ERR) \ - STATE(IDPF_VC_CONFIG_TXQ) \ - STATE(IDPF_VC_CONFIG_TXQ_ERR) \ - STATE(IDPF_VC_CONFIG_RXQ) \ - STATE(IDPF_VC_CONFIG_RXQ_ERR) \ - STATE(IDPF_VC_ENA_QUEUES) \ - STATE(IDPF_VC_ENA_QUEUES_ERR) \ - STATE(IDPF_VC_DIS_QUEUES) \ - STATE(IDPF_VC_DIS_QUEUES_ERR) \ - STATE(IDPF_VC_MAP_IRQ) \ - STATE(IDPF_VC_MAP_IRQ_ERR) \ - STATE(IDPF_VC_UNMAP_IRQ) \ - STATE(IDPF_VC_UNMAP_IRQ_ERR) \ - STATE(IDPF_VC_ADD_QUEUES) \ - STATE(IDPF_VC_ADD_QUEUES_ERR) \ - STATE(IDPF_VC_DEL_QUEUES) \ - STATE(IDPF_VC_DEL_QUEUES_ERR) \ - STATE(IDPF_VC_ALLOC_VECTORS) \ - STATE(IDPF_VC_ALLOC_VECTORS_ERR) \ - STATE(IDPF_VC_DEALLOC_VECTORS) \ - STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \ - STATE(IDPF_VC_SET_SRIOV_VFS) \ - STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \ - STATE(IDPF_VC_GET_RSS_LUT) \ - STATE(IDPF_VC_GET_RSS_LUT_ERR) \ - STATE(IDPF_VC_SET_RSS_LUT) \ - STATE(IDPF_VC_SET_RSS_LUT_ERR) \ - STATE(IDPF_VC_GET_RSS_KEY) \ - STATE(IDPF_VC_GET_RSS_KEY_ERR) \ - STATE(IDPF_VC_SET_RSS_KEY) \ - STATE(IDPF_VC_SET_RSS_KEY_ERR) \ - STATE(IDPF_VC_GET_STATS) \ - STATE(IDPF_VC_GET_STATS_ERR) \ - STATE(IDPF_VC_ADD_MAC_ADDR) \ - STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \ - STATE(IDPF_VC_DEL_MAC_ADDR) \ - STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \ - STATE(IDPF_VC_GET_PTYPE_INFO) \ - STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \ - STATE(IDPF_VC_LOOPBACK_STATE) \ - STATE(IDPF_VC_LOOPBACK_STATE_ERR) \ - STATE(IDPF_VC_NBITS) - -#define IDPF_GEN_ENUM(ENUM) ENUM, -#define IDPF_GEN_STRING(STRING) #STRING, - -enum idpf_vport_vc_state { - IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM) -}; - -extern const char * const idpf_vport_vc_state_str[]; - /** * enum idpf_vport_reset_cause - Vport soft reset causes * @IDPF_SR_Q_CHANGE: Soft reset queue change @@ -358,11 +291,7 @@ struct idpf_port_stats { * @port_stats: per port csum, header split, and other offload stats * @link_up: True if link is up * @link_speed_mbps: Link speed in mbps - * @vc_msg: Virtchnl message buffer - * @vc_state: Virtchnl message state - * @vchnl_wq: Wait queue for virtchnl messages * @sw_marker_wq: workqueue for marker packets - * @vc_buf_lock: Lock to protect virtchnl buffer */ struct idpf_vport { u16 num_txq; @@ -408,12 +337,7 @@ struct idpf_vport { bool link_up; u32 link_speed_mbps; - char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; - DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); - - wait_queue_head_t vchnl_wq; wait_queue_head_t sw_marker_wq; - struct mutex vc_buf_lock; }; /** @@ -476,15 +400,11 @@ struct idpf_vport_user_config_data { * enum idpf_vport_config_flags - Vport config flags * @IDPF_VPORT_REG_NETDEV: Register netdev * @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset - * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight - * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight * @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last */ enum idpf_vport_config_flags { IDPF_VPORT_REG_NETDEV, IDPF_VPORT_UP_REQUESTED, - IDPF_VPORT_ADD_MAC_REQ, - IDPF_VPORT_DEL_MAC_REQ, IDPF_VPORT_CONFIG_FLAGS_NBITS, }; @@ -555,11 +475,13 @@ struct idpf_vector_lifo { struct idpf_vport_config { struct idpf_vport_user_config_data user_config; struct idpf_vport_max_q max_q; - void *req_qs_chunks; + struct virtchnl2_add_queues *req_qs_chunks; spinlock_t mac_filter_list_lock; DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS); }; +struct idpf_vc_xn_manager; + /** * struct idpf_adapter - Device data struct generated on probe * @pdev: PCI device struct given on probe @@ -601,9 +523,7 @@ struct idpf_vport_config { * @stats_task: Periodic statistics retrieval task * @stats_wq: Workqueue for statistics task * @caps: Negotiated capabilities with device - * @vchnl_wq: Wait queue for virtchnl messages - * @vc_state: Virtchnl message state - * @vc_msg: Virtchnl message buffer + * @vcxn_mngr: Virtchnl transaction manager * @dev_ops: See idpf_dev_ops * @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk * to VFs but is used to initialize them @@ -659,10 +579,8 @@ struct idpf_adapter { struct delayed_work stats_task; struct workqueue_struct *stats_wq; struct virtchnl2_get_capabilities caps; + struct idpf_vc_xn_manager *vcxn_mngr; - wait_queue_head_t vchnl_wq; - DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); - char vc_msg[IDPF_CTLQ_MAX_BUF_LEN]; struct idpf_dev_ops dev_ops; int num_vfs; bool crc_enable; @@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work); void idpf_vc_event_task(struct work_struct *work); void idpf_dev_ops_init(struct idpf_adapter *adapter); void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); -int idpf_vport_adjust_qs(struct idpf_vport *vport); -int idpf_init_dflt_mbx(struct idpf_adapter *adapter); -void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); -int idpf_vc_core_init(struct idpf_adapter *adapter); -void idpf_vc_core_deinit(struct idpf_adapter *adapter); int idpf_intr_req(struct idpf_adapter *adapter); void idpf_intr_rel(struct idpf_adapter *adapter); -int idpf_get_reg_intr_vecs(struct idpf_vport *vport, - struct idpf_vec_regs *reg_vals); u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); -int idpf_send_delete_queues_msg(struct idpf_vport *vport); -int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, - u16 num_complq, u16 num_rx_q, u16 num_rx_bufq); int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_reset_cause reset_cause); -int idpf_send_enable_vport_msg(struct idpf_vport *vport); -int idpf_send_disable_vport_msg(struct idpf_vport *vport); -int idpf_send_destroy_vport_msg(struct idpf_vport *vport); -int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport); -int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport); -int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); -int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); -int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter); -int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors); void idpf_deinit_task(struct idpf_adapter *adapter); int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter, u16 *q_vector_idxs, struct idpf_vector_info *vec_info); -int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport); -int idpf_send_get_stats_msg(struct idpf_vport *vport); -int idpf_get_vec_ids(struct idpf_adapter *adapter, - u16 *vecids, int num_vecids, - struct virtchnl2_vector_chunks *chunks); -int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, - void *msg, int msg_size); -int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, - u16 msg_size, u8 *msg); void idpf_set_ethtool_ops(struct net_device *netdev); -int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, - struct idpf_vport_max_q *max_q); -void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, - struct idpf_vport_max_q *max_q); -int idpf_add_del_mac_filters(struct idpf_vport *vport, - struct idpf_netdev_priv *np, - bool add, bool async); -int idpf_set_promiscuous(struct idpf_adapter *adapter, - struct idpf_vport_user_config_data *config_data, - u32 vport_id); -int idpf_send_disable_queues_msg(struct idpf_vport *vport); -void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); -u32 idpf_get_vport_id(struct idpf_vport *vport); -int idpf_vport_queue_ids_init(struct idpf_vport *vport); -int idpf_queue_reg_init(struct idpf_vport *vport); -int idpf_send_config_queues_msg(struct idpf_vport *vport); -int idpf_send_enable_queues_msg(struct idpf_vport *vport); -int idpf_send_create_vport_msg(struct idpf_adapter *adapter, - struct idpf_vport_max_q *max_q); -int idpf_check_supported_desc_ids(struct idpf_vport *vport); void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector, u16 itr, bool tx); -int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); -int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs); u8 idpf_vport_get_hsplit(const struct idpf_vport *vport); diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index c7f43d2fcd13..4849590a5591 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -516,6 +516,8 @@ post_buffs_out: /* Wrap to end of end ring since current ntp is 0 */ cq->next_to_post = cq->ring_size - 1; + dma_wmb(); + wr32(hw, cq->reg.tail, cq->next_to_post); } @@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, int err = 0; u16 i; - if (*num_q_msg == 0) - return 0; - else if (*num_q_msg > cq->ring_size) - return -EBADR; - /* take the lock before we start messing with the ring */ mutex_lock(&cq->cq_lock); diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index 8dee098bbfb0..e8e046ef2f0d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -69,6 +69,11 @@ struct idpf_ctlq_msg { u8 context[IDPF_INDIRECT_CTX_SIZE]; struct idpf_dma_mem *payload; } indirect; + struct { + u32 rsvd; + u16 data; + u16 flags; + } sw_cookie; } ctx; }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c index 34ad1ac46b78..3df9935685e9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_lan_pf_regs.h" +#include "idpf_virtchnl.h" #define IDPF_PF_ITR_IDX_SPACING 0x4 diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index 58179bd733ff..5d3532c27d57 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2,14 +2,11 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_virtchnl.h" static const struct net_device_ops idpf_netdev_ops_splitq; static const struct net_device_ops idpf_netdev_ops_singleq; -const char * const idpf_vport_vc_state_str[] = { - IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING) -}; - /** * idpf_init_vector_stack - Fill the MSIX vector stack with vector index * @adapter: private data struct @@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) */ void idpf_intr_rel(struct idpf_adapter *adapter) { - int err; - if (!adapter->msix_entries) return; idpf_mb_intr_rel_irq(adapter); pci_free_irq_vectors(adapter->pdev); - - err = idpf_send_dealloc_vectors_msg(adapter); - if (err) - dev_err(&adapter->pdev->dev, - "Failed to deallocate vectors: %d\n", err); - + idpf_send_dealloc_vectors_msg(adapter); idpf_deinit_vector_stack(adapter); kfree(adapter->msix_entries); adapter->msix_entries = NULL; @@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport) struct idpf_rss_data *rss_data; struct idpf_vport_max_q max_q; u16 idx = vport->idx; - int i; vport_config = adapter->vport_config[vport->idx]; idpf_deinit_rss(vport); @@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport) idpf_send_destroy_vport_msg(vport); - /* Set all bits as we dont know on which vc_state the vport vhnl_wq - * is waiting on and wakeup the virtchnl workqueue even if it is - * waiting for the response as we are going down - */ - for (i = 0; i < IDPF_VC_NBITS; i++) - set_bit(i, vport->vc_state); - wake_up(&vport->vchnl_wq); - - mutex_destroy(&vport->vc_buf_lock); - - /* Clear all the bits */ - for (i = 0; i < IDPF_VC_NBITS; i++) - clear_bit(i, vport->vc_state); - /* Release all max queues allocated to the adapter's pool */ max_q.max_rxq = vport_config->max_q.max_rxq; max_q.max_txq = vport_config->max_q.max_txq; @@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work) queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, msecs_to_jiffies(300)); - idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0); + idpf_recv_mb_msg(adapter); } /** @@ -1543,9 +1518,7 @@ void idpf_init_task(struct work_struct *work) vport_config = adapter->vport_config[index]; init_waitqueue_head(&vport->sw_marker_wq); - init_waitqueue_head(&vport->vchnl_wq); - mutex_init(&vport->vc_buf_lock); spin_lock_init(&vport_config->mac_filter_list_lock); INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list); @@ -1823,6 +1796,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter) goto unlock_mutex; } + queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); + /* Initialize the state machine, also allocate memory and request * resources */ @@ -1902,7 +1877,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, * mess with. Nothing below should use those variables from new_vport * and should instead always refer to them in vport if they need to. */ - memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state)); + memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps)); /* Adjust resource parameters prior to reallocating resources */ switch (reset_cause) { @@ -1951,7 +1926,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, /* Same comment as above regarding avoiding copying the wait_queues and * mutexes applies here. We do not want to mess with those if possible. */ - memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state)); + memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); /* Since idpf_vport_queues_alloc was called with new_port, the queue * back pointers are currently pointing to the local new_vport. Reset diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index e1febc74cefd..f784eea044bd 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_devids.h" +#include "idpf_virtchnl.h" #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" @@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev) idpf_sriov_configure(pdev, 0); idpf_vc_core_deinit(adapter); + /* Be a good citizen and leave the device clean on exit */ adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET); idpf_deinit_dflt_mbx(adapter); @@ -66,6 +68,8 @@ destroy_wqs: adapter->vport_config = NULL; kfree(adapter->netdevs); adapter->netdevs = NULL; + kfree(adapter->vcxn_mngr); + adapter->vcxn_mngr = NULL; mutex_destroy(&adapter->vport_ctrl_lock); mutex_destroy(&adapter->vector_lock); @@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mutex_init(&adapter->queue_lock); mutex_init(&adapter->vc_buf_lock); - init_waitqueue_head(&adapter->vchnl_wq); - INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task); INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task); INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index 2f8ad79ae3f0..6dd7a66bb897 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -2,6 +2,7 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_virtchnl.h" /** * idpf_buf_lifo_push - push a buffer pointer onto stack diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c index 8ade4e3a9fe1..629cb5cb7c9f 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c +++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c @@ -3,6 +3,7 @@ #include "idpf.h" #include "idpf_lan_vf_regs.h" +#include "idpf_virtchnl.h" #define IDPF_VF_ITR_IDX_SPACING 0x40 @@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter, /* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */ if (trig_cause == IDPF_HR_FUNC_RESET && !test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) - idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL); + idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index d0cdd63b3d5b..a5f9b7a5effe 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2,46 +2,192 @@ /* Copyright (C) 2023 Intel Corporation */ #include "idpf.h" +#include "idpf_virtchnl.h" + +#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000 +#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000) +#define IDPF_VC_XN_IDX_M GENMASK(7, 0) +#define IDPF_VC_XN_SALT_M GENMASK(15, 8) +#define IDPF_VC_XN_RING_LEN U8_MAX + +/** + * enum idpf_vc_xn_state - Virtchnl transaction status + * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used + * @IDPF_VC_XN_WAITING: expecting a reply, not yet received + * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received, + * buffer updated + * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there + * was an error, buffer not updated + * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down + * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the + * return context; a callback may be provided to handle + * return + */ +enum idpf_vc_xn_state { + IDPF_VC_XN_IDLE = 1, + IDPF_VC_XN_WAITING, + IDPF_VC_XN_COMPLETED_SUCCESS, + IDPF_VC_XN_COMPLETED_FAILED, + IDPF_VC_XN_SHUTDOWN, + IDPF_VC_XN_ASYNC, +}; + +struct idpf_vc_xn; +/* Callback for asynchronous messages */ +typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *, + const struct idpf_ctlq_msg *); + +/** + * struct idpf_vc_xn - Data structure representing virtchnl transactions + * @completed: virtchnl event loop uses that to signal when a reply is + * available, uses kernel completion API + * @state: virtchnl event loop stores the data below, protected by the + * completion's lock. + * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be + * truncated on its way to the receiver thread according to + * reply_buf.iov_len. + * @reply: Reference to the buffer(s) where the reply data should be written + * to. May be 0-length (then NULL address permitted) if the reply data + * should be ignored. + * @async_handler: if sent asynchronously, a callback can be provided to handle + * the reply when it's received + * @vc_op: corresponding opcode sent with this transaction + * @idx: index used as retrieval on reply receive, used for cookie + * @salt: changed every message to make unique, used for cookie + */ +struct idpf_vc_xn { + struct completion completed; + enum idpf_vc_xn_state state; + size_t reply_sz; + struct kvec reply; + async_vc_cb async_handler; + u32 vc_op; + u8 idx; + u8 salt; +}; + +/** + * struct idpf_vc_xn_params - Parameters for executing transaction + * @send_buf: kvec for send buffer + * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length + * @timeout_ms: timeout to wait for reply + * @async: send message asynchronously, will not wait on completion + * @async_handler: If sent asynchronously, optional callback handler. The user + * must be careful when using async handlers as the memory for + * the recv_buf _cannot_ be on stack if this is async. + * @vc_op: virtchnl op to send + */ +struct idpf_vc_xn_params { + struct kvec send_buf; + struct kvec recv_buf; + int timeout_ms; + bool async; + async_vc_cb async_handler; + u32 vc_op; +}; + +/** + * struct idpf_vc_xn_manager - Manager for tracking transactions + * @ring: backing and lookup for transactions + * @free_xn_bm: bitmap for free transactions + * @xn_bm_lock: make bitmap access synchronous where necessary + * @salt: used to make cookie unique every message + */ +struct idpf_vc_xn_manager { + struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN]; + DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN); + spinlock_t xn_bm_lock; + u8 salt; +}; + +/** + * idpf_vid_to_vport - Translate vport id to vport pointer + * @adapter: private data struct + * @v_id: vport id to translate + * + * Returns vport matching v_id, NULL if not found. + */ +static +struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id) +{ + u16 num_max_vports = idpf_get_max_vports(adapter); + int i; + + for (i = 0; i < num_max_vports; i++) + if (adapter->vport_ids[i] == v_id) + return adapter->vports[i]; + + return NULL; +} + +/** + * idpf_handle_event_link - Handle link event message + * @adapter: private data struct + * @v2e: virtchnl event message + */ +static void idpf_handle_event_link(struct idpf_adapter *adapter, + const struct virtchnl2_event *v2e) +{ + struct idpf_netdev_priv *np; + struct idpf_vport *vport; + + vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); + if (!vport) { + dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n", + v2e->vport_id); + return; + } + np = netdev_priv(vport->netdev); + + vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); + + if (vport->link_up == v2e->link_status) + return; + + vport->link_up = v2e->link_status; + + if (np->state != __IDPF_VPORT_UP) + return; + + if (vport->link_up) { + netif_tx_start_all_queues(vport->netdev); + netif_carrier_on(vport->netdev); + } else { + netif_tx_stop_all_queues(vport->netdev); + netif_carrier_off(vport->netdev); + } +} /** * idpf_recv_event_msg - Receive virtchnl event message - * @vport: virtual port structure + * @adapter: Driver specific private structure * @ctlq_msg: message to copy from * * Receive virtchnl event message */ -static void idpf_recv_event_msg(struct idpf_vport *vport, +static void idpf_recv_event_msg(struct idpf_adapter *adapter, struct idpf_ctlq_msg *ctlq_msg) { - struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + int payload_size = ctlq_msg->ctx.indirect.payload->size; struct virtchnl2_event *v2e; - bool link_status; u32 event; + if (payload_size < sizeof(*v2e)) { + dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode, + payload_size); + return; + } + v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va; event = le32_to_cpu(v2e->event); switch (event) { case VIRTCHNL2_EVENT_LINK_CHANGE: - vport->link_speed_mbps = le32_to_cpu(v2e->link_speed); - link_status = v2e->link_status; - - if (vport->link_up == link_status) - break; - - vport->link_up = link_status; - if (np->state == __IDPF_VPORT_UP) { - if (vport->link_up) { - netif_carrier_on(vport->netdev); - netif_tx_start_all_queues(vport->netdev); - } else { - netif_tx_stop_all_queues(vport->netdev); - netif_carrier_off(vport->netdev); - } - } - break; + idpf_handle_event_link(adapter, v2e); + return; default: - dev_err(&vport->adapter->pdev->dev, + dev_err(&adapter->pdev->dev, "Unknown event %d from PF\n", event); break; } @@ -93,13 +239,14 @@ err_kfree: * @op: virtchnl opcode * @msg_size: size of the payload * @msg: pointer to buffer holding the payload + * @cookie: unique SW generated cookie per message * * Will prepare the control queue message and initiates the send api * * Returns 0 on success, negative on failure */ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, - u16 msg_size, u8 *msg) + u16 msg_size, u8 *msg, u16 cookie) { struct idpf_ctlq_msg *ctlq_msg; struct idpf_dma_mem *dma_mem; @@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, err = -ENOMEM; goto dma_alloc_error; } - memcpy(dma_mem->va, msg, msg_size); + + /* It's possible we're just sending an opcode but no buffer */ + if (msg && msg_size) + memcpy(dma_mem->va, msg, msg_size); ctlq_msg->ctx.indirect.payload = dma_mem; + ctlq_msg->ctx.sw_cookie.data = cookie; err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg); if (err) @@ -159,592 +310,432 @@ dma_mem_error: return err; } -/** - * idpf_find_vport - Find vport pointer from control queue message - * @adapter: driver specific private structure - * @vport: address of vport pointer to copy the vport from adapters vport list - * @ctlq_msg: control queue message +/* API for virtchnl "transaction" support ("xn" for short). * - * Return 0 on success, error value on failure. Also this function does check - * for the opcodes which expect to receive payload and return error value if - * it is not the case. + * We are reusing the completion lock to serialize the accesses to the + * transaction state for simplicity, but it could be its own separate synchro + * as well. For now, this API is only used from within a workqueue context; + * raw_spin_lock() is enough. */ -static int idpf_find_vport(struct idpf_adapter *adapter, - struct idpf_vport **vport, - struct idpf_ctlq_msg *ctlq_msg) -{ - bool no_op = false, vid_found = false; - int i, err = 0; - char *vc_msg; - u32 v_id; +/** + * idpf_vc_xn_lock - Request exclusive access to vc transaction + * @xn: struct idpf_vc_xn* to access + */ +#define idpf_vc_xn_lock(xn) \ + raw_spin_lock(&(xn)->completed.wait.lock) - vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL); - if (!vc_msg) - return -ENOMEM; +/** + * idpf_vc_xn_unlock - Release exclusive access to vc transaction + * @xn: struct idpf_vc_xn* to access + */ +#define idpf_vc_xn_unlock(xn) \ + raw_spin_unlock(&(xn)->completed.wait.lock) - if (ctlq_msg->data_len) { - size_t payload_size = ctlq_msg->ctx.indirect.payload->size; +/** + * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and + * reset the transaction state. + * @xn: struct idpf_vc_xn to update + */ +static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn) +{ + xn->reply.iov_base = NULL; + xn->reply.iov_len = 0; - if (!payload_size) { - dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n"); - kfree(vc_msg); + if (xn->state != IDPF_VC_XN_SHUTDOWN) + xn->state = IDPF_VC_XN_IDLE; +} - return -EINVAL; - } +/** + * idpf_vc_xn_init - Initialize virtchnl transaction object + * @vcxn_mngr: pointer to vc transaction manager struct + */ +static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr) +{ + int i; - memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va, - min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN)); - } - - switch (ctlq_msg->cookie.mbx.chnl_opcode) { - case VIRTCHNL2_OP_VERSION: - case VIRTCHNL2_OP_GET_CAPS: - case VIRTCHNL2_OP_CREATE_VPORT: - case VIRTCHNL2_OP_SET_SRIOV_VFS: - case VIRTCHNL2_OP_ALLOC_VECTORS: - case VIRTCHNL2_OP_DEALLOC_VECTORS: - case VIRTCHNL2_OP_GET_PTYPE_INFO: - goto free_vc_msg; - case VIRTCHNL2_OP_ENABLE_VPORT: - case VIRTCHNL2_OP_DISABLE_VPORT: - case VIRTCHNL2_OP_DESTROY_VPORT: - v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_CONFIG_TX_QUEUES: - v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_CONFIG_RX_QUEUES: - v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_ENABLE_QUEUES: - case VIRTCHNL2_OP_DISABLE_QUEUES: - case VIRTCHNL2_OP_DEL_QUEUES: - v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_ADD_QUEUES: - v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: - case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: - v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_GET_STATS: - v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_GET_RSS_LUT: - case VIRTCHNL2_OP_SET_RSS_LUT: - v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_GET_RSS_KEY: - case VIRTCHNL2_OP_SET_RSS_KEY: - v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_EVENT: - v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_LOOPBACK: - v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: - v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id); - break; - case VIRTCHNL2_OP_ADD_MAC_ADDR: - case VIRTCHNL2_OP_DEL_MAC_ADDR: - v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id); - break; - default: - no_op = true; - break; - } + spin_lock_init(&vcxn_mngr->xn_bm_lock); - if (no_op) - goto free_vc_msg; + for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { + struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; - for (i = 0; i < idpf_get_max_vports(adapter); i++) { - if (adapter->vport_ids[i] == v_id) { - vid_found = true; - break; - } + xn->state = IDPF_VC_XN_IDLE; + xn->idx = i; + idpf_vc_xn_release_bufs(xn); + init_completion(&xn->completed); } - if (vid_found) - *vport = adapter->vports[i]; - else - err = -EINVAL; - -free_vc_msg: - kfree(vc_msg); - - return err; + bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); } /** - * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer. - * @adapter: driver specific private structure - * @vport: virtual port structure - * @ctlq_msg: msg to copy from - * @err_enum: err bit to set on error + * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object + * @vcxn_mngr: pointer to vc transaction manager struct * - * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success, - * negative on failure. + * All waiting threads will be woken-up and their transaction aborted. Further + * operations on that object will fail. */ -static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter, - struct idpf_vport *vport, - struct idpf_ctlq_msg *ctlq_msg, - enum idpf_vport_vc_state err_enum) +static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr) { - if (ctlq_msg->cookie.mbx.chnl_retval) { - if (vport) - set_bit(err_enum, vport->vc_state); - else - set_bit(err_enum, adapter->vc_state); + int i; - return -EINVAL; - } + spin_lock_bh(&vcxn_mngr->xn_bm_lock); + bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); + spin_unlock_bh(&vcxn_mngr->xn_bm_lock); - if (vport) - memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va, - min_t(int, ctlq_msg->ctx.indirect.payload->size, - IDPF_CTLQ_MAX_BUF_LEN)); - else - memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va, - min_t(int, ctlq_msg->ctx.indirect.payload->size, - IDPF_CTLQ_MAX_BUF_LEN)); + for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) { + struct idpf_vc_xn *xn = &vcxn_mngr->ring[i]; - return 0; + idpf_vc_xn_lock(xn); + xn->state = IDPF_VC_XN_SHUTDOWN; + idpf_vc_xn_release_bufs(xn); + idpf_vc_xn_unlock(xn); + complete_all(&xn->completed); + } } /** - * idpf_recv_vchnl_op - helper function with common logic when handling the - * reception of VIRTCHNL OPs. - * @adapter: driver specific private structure - * @vport: virtual port structure - * @ctlq_msg: msg to copy from - * @state: state bit used on timeout check - * @err_state: err bit to set on error + * idpf_vc_xn_pop_free - Pop a free transaction from free list + * @vcxn_mngr: transaction manager to pop from + * + * Returns NULL if no free transactions */ -static void idpf_recv_vchnl_op(struct idpf_adapter *adapter, - struct idpf_vport *vport, - struct idpf_ctlq_msg *ctlq_msg, - enum idpf_vport_vc_state state, - enum idpf_vport_vc_state err_state) +static +struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr) { - wait_queue_head_t *vchnl_wq; - int err; + struct idpf_vc_xn *xn = NULL; + unsigned long free_idx; - if (vport) - vchnl_wq = &vport->vchnl_wq; - else - vchnl_wq = &adapter->vchnl_wq; + spin_lock_bh(&vcxn_mngr->xn_bm_lock); + free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN); + if (free_idx == IDPF_VC_XN_RING_LEN) + goto do_unlock; - err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state); - if (wq_has_sleeper(vchnl_wq)) { - if (vport) - set_bit(state, vport->vc_state); - else - set_bit(state, adapter->vc_state); + clear_bit(free_idx, vcxn_mngr->free_xn_bm); + xn = &vcxn_mngr->ring[free_idx]; + xn->salt = vcxn_mngr->salt++; - wake_up(vchnl_wq); - } else { - if (!err) { - dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n", - ctlq_msg->cookie.mbx.chnl_opcode); - } else { - /* Clear the errors since there is no sleeper to pass - * them on - */ - if (vport) - clear_bit(err_state, vport->vc_state); - else - clear_bit(err_state, adapter->vc_state); - } - } +do_unlock: + spin_unlock_bh(&vcxn_mngr->xn_bm_lock); + + return xn; } /** - * idpf_recv_mb_msg - Receive message over mailbox - * @adapter: Driver specific private structure - * @op: virtchannel operation code - * @msg: Received message holding buffer - * @msg_size: message size - * - * Will receive control queue message and posts the receive buffer. Returns 0 - * on success and negative on failure. + * idpf_vc_xn_push_free - Push a free transaction to free list + * @vcxn_mngr: transaction manager to push to + * @xn: transaction to push */ -int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op, - void *msg, int msg_size) +static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr, + struct idpf_vc_xn *xn) { - struct idpf_vport *vport = NULL; - struct idpf_ctlq_msg ctlq_msg; - struct idpf_dma_mem *dma_mem; - bool work_done = false; - int num_retry = 2000; - u16 num_q_msg; - int err; - - while (1) { - struct idpf_vport_config *vport_config; - int payload_size = 0; - - /* Try to get one message */ - num_q_msg = 1; - dma_mem = NULL; - err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg); - /* If no message then decide if we have to retry based on - * opcode - */ - if (err || !num_q_msg) { - /* Increasing num_retry to consider the delayed - * responses because of large number of VF's mailbox - * messages. If the mailbox message is received from - * the other side, we come out of the sleep cycle - * immediately else we wait for more time. - */ - if (!op || !num_retry--) - break; - if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) { - err = -EIO; - break; - } - msleep(20); - continue; - } + idpf_vc_xn_release_bufs(xn); + set_bit(xn->idx, vcxn_mngr->free_xn_bm); +} - /* If we are here a message is received. Check if we are looking - * for a specific message based on opcode. If it is different - * ignore and post buffers +/** + * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction + * @adapter: driver specific private structure with vcxn_mngr + * @params: parameters for this particular transaction including + * -vc_op: virtchannel operation to send + * -send_buf: kvec iov for send buf and len + * -recv_buf: kvec iov for recv buf and len (ignored if NULL) + * -timeout_ms: timeout waiting for a reply (milliseconds) + * -async: don't wait for message reply, will lose caller context + * -async_handler: callback to handle async replies + * + * @returns >= 0 for success, the size of the initial reply (may or may not be + * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for + * error. + */ +static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter, + const struct idpf_vc_xn_params *params) +{ + const struct kvec *send_buf = ¶ms->send_buf; + struct idpf_vc_xn *xn; + ssize_t retval; + u16 cookie; + + xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr); + /* no free transactions available */ + if (!xn) + return -ENOSPC; + + idpf_vc_xn_lock(xn); + if (xn->state == IDPF_VC_XN_SHUTDOWN) { + retval = -ENXIO; + goto only_unlock; + } else if (xn->state != IDPF_VC_XN_IDLE) { + /* We're just going to clobber this transaction even though + * it's not IDLE. If we don't reuse it we could theoretically + * eventually leak all the free transactions and not be able to + * send any messages. At least this way we make an attempt to + * remain functional even though something really bad is + * happening that's corrupting what was supposed to be free + * transactions. */ - if (op && ctlq_msg.cookie.mbx.chnl_opcode != op) - goto post_buffs; + WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n", + xn->idx, xn->vc_op); + } - err = idpf_find_vport(adapter, &vport, &ctlq_msg); - if (err) - goto post_buffs; + xn->reply = params->recv_buf; + xn->reply_sz = 0; + xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING; + xn->vc_op = params->vc_op; + xn->async_handler = params->async_handler; + idpf_vc_xn_unlock(xn); - if (ctlq_msg.data_len) - payload_size = ctlq_msg.ctx.indirect.payload->size; + if (!params->async) + reinit_completion(&xn->completed); + cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) | + FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx); - /* All conditions are met. Either a message requested is - * received or we received a message to be processed - */ - switch (ctlq_msg.cookie.mbx.chnl_opcode) { - case VIRTCHNL2_OP_VERSION: - case VIRTCHNL2_OP_GET_CAPS: - if (ctlq_msg.cookie.mbx.chnl_retval) { - dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n", - ctlq_msg.cookie.mbx.chnl_opcode, - ctlq_msg.cookie.mbx.chnl_retval); - err = -EBADMSG; - } else if (msg) { - memcpy(msg, ctlq_msg.ctx.indirect.payload->va, - min_t(int, payload_size, msg_size)); - } - work_done = true; - break; - case VIRTCHNL2_OP_CREATE_VPORT: - idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, - IDPF_VC_CREATE_VPORT, - IDPF_VC_CREATE_VPORT_ERR); - break; - case VIRTCHNL2_OP_ENABLE_VPORT: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_ENA_VPORT, - IDPF_VC_ENA_VPORT_ERR); - break; - case VIRTCHNL2_OP_DISABLE_VPORT: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_DIS_VPORT, - IDPF_VC_DIS_VPORT_ERR); - break; - case VIRTCHNL2_OP_DESTROY_VPORT: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_DESTROY_VPORT, - IDPF_VC_DESTROY_VPORT_ERR); - break; - case VIRTCHNL2_OP_CONFIG_TX_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_CONFIG_TXQ, - IDPF_VC_CONFIG_TXQ_ERR); - break; - case VIRTCHNL2_OP_CONFIG_RX_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_CONFIG_RXQ, - IDPF_VC_CONFIG_RXQ_ERR); - break; - case VIRTCHNL2_OP_ENABLE_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_ENA_QUEUES, - IDPF_VC_ENA_QUEUES_ERR); - break; - case VIRTCHNL2_OP_DISABLE_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_DIS_QUEUES, - IDPF_VC_DIS_QUEUES_ERR); - break; - case VIRTCHNL2_OP_ADD_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_ADD_QUEUES, - IDPF_VC_ADD_QUEUES_ERR); - break; - case VIRTCHNL2_OP_DEL_QUEUES: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_DEL_QUEUES, - IDPF_VC_DEL_QUEUES_ERR); - break; - case VIRTCHNL2_OP_MAP_QUEUE_VECTOR: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_MAP_IRQ, - IDPF_VC_MAP_IRQ_ERR); - break; - case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_UNMAP_IRQ, - IDPF_VC_UNMAP_IRQ_ERR); - break; - case VIRTCHNL2_OP_GET_STATS: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_GET_STATS, - IDPF_VC_GET_STATS_ERR); - break; - case VIRTCHNL2_OP_GET_RSS_LUT: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_GET_RSS_LUT, - IDPF_VC_GET_RSS_LUT_ERR); - break; - case VIRTCHNL2_OP_SET_RSS_LUT: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_SET_RSS_LUT, - IDPF_VC_SET_RSS_LUT_ERR); - break; - case VIRTCHNL2_OP_GET_RSS_KEY: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_GET_RSS_KEY, - IDPF_VC_GET_RSS_KEY_ERR); - break; - case VIRTCHNL2_OP_SET_RSS_KEY: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_SET_RSS_KEY, - IDPF_VC_SET_RSS_KEY_ERR); - break; - case VIRTCHNL2_OP_SET_SRIOV_VFS: - idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, - IDPF_VC_SET_SRIOV_VFS, - IDPF_VC_SET_SRIOV_VFS_ERR); - break; - case VIRTCHNL2_OP_ALLOC_VECTORS: - idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, - IDPF_VC_ALLOC_VECTORS, - IDPF_VC_ALLOC_VECTORS_ERR); - break; - case VIRTCHNL2_OP_DEALLOC_VECTORS: - idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, - IDPF_VC_DEALLOC_VECTORS, - IDPF_VC_DEALLOC_VECTORS_ERR); - break; - case VIRTCHNL2_OP_GET_PTYPE_INFO: - idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg, - IDPF_VC_GET_PTYPE_INFO, - IDPF_VC_GET_PTYPE_INFO_ERR); - break; - case VIRTCHNL2_OP_LOOPBACK: - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_LOOPBACK_STATE, - IDPF_VC_LOOPBACK_STATE_ERR); - break; - case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE: - /* This message can only be sent asynchronously. As - * such we'll have lost the context in which it was - * called and thus can only really report if it looks - * like an error occurred. Don't bother setting ERR bit - * or waking chnl_wq since no work queue will be waiting - * to read the message. - */ - if (ctlq_msg.cookie.mbx.chnl_retval) { - dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n", - ctlq_msg.cookie.mbx.chnl_retval); - } - break; - case VIRTCHNL2_OP_ADD_MAC_ADDR: - vport_config = adapter->vport_config[vport->idx]; - if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ, - vport_config->flags)) { - /* Message was sent asynchronously. We don't - * normally print errors here, instead - * prefer to handle errors in the function - * calling wait_for_event. However, if - * asynchronous, the context in which the - * message was sent is lost. We can't really do - * anything about at it this point, but we - * should at a minimum indicate that it looks - * like something went wrong. Also don't bother - * setting ERR bit or waking vchnl_wq since no - * one will be waiting to read the async - * message. - */ - if (ctlq_msg.cookie.mbx.chnl_retval) - dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n", - ctlq_msg.cookie.mbx.chnl_retval); - break; - } - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_ADD_MAC_ADDR, - IDPF_VC_ADD_MAC_ADDR_ERR); - break; - case VIRTCHNL2_OP_DEL_MAC_ADDR: - vport_config = adapter->vport_config[vport->idx]; - if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ, - vport_config->flags)) { - /* Message was sent asynchronously like the - * VIRTCHNL2_OP_ADD_MAC_ADDR - */ - if (ctlq_msg.cookie.mbx.chnl_retval) - dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n", - ctlq_msg.cookie.mbx.chnl_retval); - break; - } - idpf_recv_vchnl_op(adapter, vport, &ctlq_msg, - IDPF_VC_DEL_MAC_ADDR, - IDPF_VC_DEL_MAC_ADDR_ERR); - break; - case VIRTCHNL2_OP_EVENT: - idpf_recv_event_msg(vport, &ctlq_msg); - break; - default: - dev_warn(&adapter->pdev->dev, - "Unhandled virtchnl response %d\n", - ctlq_msg.cookie.mbx.chnl_opcode); - break; - } + retval = idpf_send_mb_msg(adapter, params->vc_op, + send_buf->iov_len, send_buf->iov_base, + cookie); + if (retval) { + idpf_vc_xn_lock(xn); + goto release_and_unlock; + } -post_buffs: - if (ctlq_msg.data_len) - dma_mem = ctlq_msg.ctx.indirect.payload; - else - num_q_msg = 0; + if (params->async) + return 0; - err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq, - &num_q_msg, &dma_mem); - /* If post failed clear the only buffer we supplied */ - if (err && dma_mem) - dma_free_coherent(&adapter->pdev->dev, dma_mem->size, - dma_mem->va, dma_mem->pa); + wait_for_completion_timeout(&xn->completed, + msecs_to_jiffies(params->timeout_ms)); - /* Applies only if we are looking for a specific opcode */ - if (work_done) - break; + /* No need to check the return value; we check the final state of the + * transaction below. It's possible the transaction actually gets more + * timeout than specified if we get preempted here but after + * wait_for_completion_timeout returns. This should be non-issue + * however. + */ + idpf_vc_xn_lock(xn); + switch (xn->state) { + case IDPF_VC_XN_SHUTDOWN: + retval = -ENXIO; + goto only_unlock; + case IDPF_VC_XN_WAITING: + dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n", + params->vc_op, params->timeout_ms); + retval = -ETIME; + break; + case IDPF_VC_XN_COMPLETED_SUCCESS: + retval = xn->reply_sz; + break; + case IDPF_VC_XN_COMPLETED_FAILED: + dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n", + params->vc_op); + retval = -EIO; + break; + default: + /* Invalid state. */ + WARN_ON_ONCE(1); + retval = -EIO; + break; } - return err; +release_and_unlock: + idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); + /* If we receive a VC reply after here, it will be dropped. */ +only_unlock: + idpf_vc_xn_unlock(xn); + + return retval; } /** - * __idpf_wait_for_event - wrapper function for wait on virtchannel response - * @adapter: Driver private data structure - * @vport: virtual port structure - * @state: check on state upon timeout - * @err_check: check if this specific error bit is set - * @timeout: Max time to wait + * idpf_vc_xn_forward_async - Handle async reply receives + * @adapter: private data struct + * @xn: transaction to handle + * @ctlq_msg: corresponding ctlq_msg * - * Checks if state is set upon expiry of timeout. Returns 0 on success, - * negative on failure. + * For async sends we're going to lose the caller's context so, if an + * async_handler was provided, it can deal with the reply, otherwise we'll just + * check and report if there is an error. */ -static int __idpf_wait_for_event(struct idpf_adapter *adapter, - struct idpf_vport *vport, - enum idpf_vport_vc_state state, - enum idpf_vport_vc_state err_check, - int timeout) +static int +idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn, + const struct idpf_ctlq_msg *ctlq_msg) { - int time_to_wait, num_waits; - wait_queue_head_t *vchnl_wq; - unsigned long *vc_state; + int err = 0; - time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT); - num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT); + if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { + dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); + xn->reply_sz = 0; + err = -EINVAL; + goto release_bufs; + } - if (vport) { - vchnl_wq = &vport->vchnl_wq; - vc_state = vport->vc_state; - } else { - vchnl_wq = &adapter->vchnl_wq; - vc_state = adapter->vc_state; + if (xn->async_handler) { + err = xn->async_handler(adapter, xn, ctlq_msg); + goto release_bufs; + } + + if (ctlq_msg->cookie.mbx.chnl_retval) { + xn->reply_sz = 0; + dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode); + err = -EINVAL; } - while (num_waits) { - int event; +release_bufs: + idpf_vc_xn_push_free(adapter->vcxn_mngr, xn); + + return err; +} + +/** + * idpf_vc_xn_forward_reply - copy a reply back to receiving thread + * @adapter: driver specific private structure with vcxn_mngr + * @ctlq_msg: controlq message to send back to receiving thread + */ +static int +idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, + const struct idpf_ctlq_msg *ctlq_msg) +{ + const void *payload = NULL; + size_t payload_size = 0; + struct idpf_vc_xn *xn; + u16 msg_info; + int err = 0; + u16 xn_idx; + u16 salt; + + msg_info = ctlq_msg->ctx.sw_cookie.data; + xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info); + if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) { + dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n", + xn_idx); + return -EINVAL; + } + xn = &adapter->vcxn_mngr->ring[xn_idx]; + salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info); + if (xn->salt != salt) { + dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n", + xn->salt, salt); + return -EINVAL; + } - /* If we are here and a reset is detected do not wait but - * return. Reset timing is out of drivers control. So - * while we are cleaning resources as part of reset if the - * underlying HW mailbox is gone, wait on mailbox messages - * is not meaningful + idpf_vc_xn_lock(xn); + switch (xn->state) { + case IDPF_VC_XN_WAITING: + /* success */ + break; + case IDPF_VC_XN_IDLE: + dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode); + err = -EINVAL; + goto out_unlock; + case IDPF_VC_XN_SHUTDOWN: + /* ENXIO is a bit special here as the recv msg loop uses that + * know if it should stop trying to clean the ring if we lost + * the virtchnl. We need to stop playing with registers and + * yield. */ - if (idpf_is_reset_detected(adapter)) - return 0; + err = -ENXIO; + goto out_unlock; + case IDPF_VC_XN_ASYNC: + err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg); + idpf_vc_xn_unlock(xn); + return err; + default: + dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode); + err = -EBUSY; + goto out_unlock; + } - event = wait_event_timeout(*vchnl_wq, - test_and_clear_bit(state, vc_state), - msecs_to_jiffies(time_to_wait)); - if (event) { - if (test_and_clear_bit(err_check, vc_state)) { - dev_err(&adapter->pdev->dev, "VC response error %s\n", - idpf_vport_vc_state_str[err_check]); + if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) { + dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n", + ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op); + xn->reply_sz = 0; + xn->state = IDPF_VC_XN_COMPLETED_FAILED; + err = -EINVAL; + goto out_unlock; + } - return -EINVAL; - } + if (ctlq_msg->cookie.mbx.chnl_retval) { + xn->reply_sz = 0; + xn->state = IDPF_VC_XN_COMPLETED_FAILED; + err = -EINVAL; + goto out_unlock; + } - return 0; - } - num_waits--; + if (ctlq_msg->data_len) { + payload = ctlq_msg->ctx.indirect.payload->va; + payload_size = ctlq_msg->ctx.indirect.payload->size; } - /* Timeout occurred */ - dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n", - idpf_vport_vc_state_str[state]); + xn->reply_sz = payload_size; + xn->state = IDPF_VC_XN_COMPLETED_SUCCESS; - return -ETIMEDOUT; + if (xn->reply.iov_base && xn->reply.iov_len && payload_size) + memcpy(xn->reply.iov_base, payload, + min_t(size_t, xn->reply.iov_len, payload_size)); + +out_unlock: + idpf_vc_xn_unlock(xn); + /* we _cannot_ hold lock while calling complete */ + complete(&xn->completed); + + return err; } /** - * idpf_min_wait_for_event - wait for virtchannel response - * @adapter: Driver private data structure - * @vport: virtual port structure - * @state: check on state upon timeout - * @err_check: check if this specific error bit is set + * idpf_recv_mb_msg - Receive message over mailbox + * @adapter: Driver specific private structure * - * Returns 0 on success, negative on failure. + * Will receive control queue message and posts the receive buffer. Returns 0 + * on success and negative on failure. */ -static int idpf_min_wait_for_event(struct idpf_adapter *adapter, - struct idpf_vport *vport, - enum idpf_vport_vc_state state, - enum idpf_vport_vc_state err_check) +int idpf_recv_mb_msg(struct idpf_adapter *adapter) { - return __idpf_wait_for_event(adapter, vport, state, err_check, - IDPF_WAIT_FOR_EVENT_TIMEO_MIN); -} + struct idpf_ctlq_msg ctlq_msg; + struct idpf_dma_mem *dma_mem; + int post_err, err; + u16 num_recv; -/** - * idpf_wait_for_event - wait for virtchannel response - * @adapter: Driver private data structure - * @vport: virtual port structure - * @state: check on state upon timeout after 500ms - * @err_check: check if this specific error bit is set - * - * Returns 0 on success, negative on failure. - */ -static int idpf_wait_for_event(struct idpf_adapter *adapter, - struct idpf_vport *vport, - enum idpf_vport_vc_state state, - enum idpf_vport_vc_state err_check) -{ - /* Increasing the timeout in __IDPF_INIT_SW flow to consider large - * number of VF's mailbox message responses. When a message is received - * on mailbox, this thread is woken up by the idpf_recv_mb_msg before - * the timeout expires. Only in the error case i.e. if no message is - * received on mailbox, we wait for the complete timeout which is - * less likely to happen. - */ - return __idpf_wait_for_event(adapter, vport, state, err_check, - IDPF_WAIT_FOR_EVENT_TIMEO); + while (1) { + /* This will get <= num_recv messages and output how many + * actually received on num_recv. + */ + num_recv = 1; + err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg); + if (err || !num_recv) + break; + + if (ctlq_msg.data_len) { + dma_mem = ctlq_msg.ctx.indirect.payload; + } else { + dma_mem = NULL; + num_recv = 0; + } + + if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT) + idpf_recv_event_msg(adapter, &ctlq_msg); + else + err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg); + + post_err = idpf_ctlq_post_rx_buffs(&adapter->hw, + adapter->hw.arq, + &num_recv, &dma_mem); + + /* If post failed clear the only buffer we supplied */ + if (post_err) { + if (dma_mem) + dmam_free_coherent(&adapter->pdev->dev, + dma_mem->size, dma_mem->va, + dma_mem->pa); + break; + } + + /* virtchnl trying to shutdown, stop cleaning */ + if (err == -ENXIO) + break; + } + + return err; } /** @@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) */ static int idpf_send_ver_msg(struct idpf_adapter *adapter) { + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_version_info vvi; + ssize_t reply_sz; + u32 major, minor; + int err = 0; if (adapter->virt_ver_maj) { vvi.major = cpu_to_le32(adapter->virt_ver_maj); @@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter) vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR); } - return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi), - (u8 *)&vvi); -} - -/** - * idpf_recv_ver_msg - Receive virtchnl version message - * @adapter: Driver specific private structure - * - * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need - * to send version message again, otherwise negative on failure. - */ -static int idpf_recv_ver_msg(struct idpf_adapter *adapter) -{ - struct virtchnl2_version_info vvi; - u32 major, minor; - int err; + xn_params.vc_op = VIRTCHNL2_OP_VERSION; + xn_params.send_buf.iov_base = &vvi; + xn_params.send_buf.iov_len = sizeof(vvi); + xn_params.recv_buf = xn_params.send_buf; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; - err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi, - sizeof(vvi)); - if (err) - return err; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz < sizeof(vvi)) + return -EIO; major = le32_to_cpu(vvi.major); minor = le32_to_cpu(vvi.minor); if (major > IDPF_VIRTCHNL_VERSION_MAJOR) { - dev_warn(&adapter->pdev->dev, - "Virtchnl major version (%d) greater than supported\n", - major); - + dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n"); return -EINVAL; } if (major == IDPF_VIRTCHNL_VERSION_MAJOR && minor > IDPF_VIRTCHNL_VERSION_MINOR) - dev_warn(&adapter->pdev->dev, - "Virtchnl minor version (%d) didn't match\n", minor); + dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n"); /* If we have a mismatch, resend version to update receiver on what * version we will use. @@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter) */ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) { - struct virtchnl2_get_capabilities caps = { }; + struct virtchnl2_get_capabilities caps = {}; + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; caps.csum_caps = cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 | @@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter) VIRTCHNL2_CAP_PROMISC | VIRTCHNL2_CAP_LOOPBACK); - return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps), - (u8 *)&caps); -} + xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS; + xn_params.send_buf.iov_base = ∩︀ + xn_params.send_buf.iov_len = sizeof(caps); + xn_params.recv_buf.iov_base = &adapter->caps; + xn_params.recv_buf.iov_len = sizeof(adapter->caps); + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; -/** - * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message - * @adapter: Driver specific private structure - * - * Receive virtchnl get capabilities message. Returns 0 on success, negative on - * failure. - */ -static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter) -{ - return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps, - sizeof(struct virtchnl2_get_capabilities)); + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz < sizeof(adapter->caps)) + return -EIO; + + return 0; } /** @@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, struct idpf_vport_max_q *max_q) { struct virtchnl2_create_vport *vport_msg; + struct idpf_vc_xn_params xn_params = {}; u16 idx = adapter->next_vport; int err, buf_size; + ssize_t reply_sz; buf_size = sizeof(struct virtchnl2_create_vport); if (!adapter->vport_params_reqd[idx]) { @@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, return err; } - mutex_lock(&adapter->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size, - (u8 *)vport_msg); - if (err) - goto rel_lock; - - err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT, - IDPF_VC_CREATE_VPORT_ERR); - if (err) { - dev_err(&adapter->pdev->dev, "Failed to receive create vport message"); - - goto rel_lock; - } - if (!adapter->vport_params_recvd[idx]) { adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); if (!adapter->vport_params_recvd[idx]) { err = -ENOMEM; - goto rel_lock; + goto free_vport_params; } } - vport_msg = adapter->vport_params_recvd[idx]; - memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT; + xn_params.send_buf.iov_base = vport_msg; + xn_params.send_buf.iov_len = buf_size; + xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx]; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) { + err = reply_sz; + goto free_vport_params; + } + if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) { + err = -EIO; + goto free_vport_params; + } -rel_lock: - mutex_unlock(&adapter->vc_buf_lock); + return 0; + +free_vport_params: + kfree(adapter->vport_params_recvd[idx]); + adapter->vport_params_recvd[idx] = NULL; + kfree(adapter->vport_params_reqd[idx]); + adapter->vport_params_reqd[idx] = NULL; return err; } @@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) */ int idpf_send_destroy_vport_msg(struct idpf_vport *vport) { - struct idpf_adapter *adapter = vport->adapter; + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_vport v_id; - int err; + ssize_t reply_sz; v_id.vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT; + xn_params.send_buf.iov_base = &v_id; + xn_params.send_buf.iov_len = sizeof(v_id); + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT, - sizeof(v_id), (u8 *)&v_id); - if (err) - goto rel_lock; - - err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT, - IDPF_VC_DESTROY_VPORT_ERR); - -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -1397,26 +1377,19 @@ rel_lock: */ int idpf_send_enable_vport_msg(struct idpf_vport *vport) { - struct idpf_adapter *adapter = vport->adapter; + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_vport v_id; - int err; + ssize_t reply_sz; v_id.vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT, - sizeof(v_id), (u8 *)&v_id); - if (err) - goto rel_lock; - - err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT, - IDPF_VC_ENA_VPORT_ERR); + xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT; + xn_params.send_buf.iov_base = &v_id; + xn_params.send_buf.iov_len = sizeof(v_id); + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -1428,26 +1401,19 @@ rel_lock: */ int idpf_send_disable_vport_msg(struct idpf_vport *vport) { - struct idpf_adapter *adapter = vport->adapter; + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_vport v_id; - int err; + ssize_t reply_sz; v_id.vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT; + xn_params.send_buf.iov_base = &v_id; + xn_params.send_buf.iov_len = sizeof(v_id); + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT, - sizeof(v_id), (u8 *)&v_id); - if (err) - goto rel_lock; - - err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT, - IDPF_VC_DIS_VPORT_ERR); - -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -1459,11 +1425,13 @@ rel_lock: */ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) { - struct virtchnl2_config_tx_queues *ctq; + struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL; + struct virtchnl2_txq_info *qi __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; u32 config_sz, chunk_sz, buf_sz; int totqs, num_msgs, num_chunks; - struct virtchnl2_txq_info *qi; - int err = 0, i, k = 0; + ssize_t reply_sz; + int i, k = 0; totqs = vport->num_txq + vport->num_complq; qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL); @@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) } /* Make sure accounting agrees */ - if (k != totqs) { - err = -EINVAL; - goto error; - } + if (k != totqs) + return -EINVAL; /* Chunk up the queue contexts into multiple messages to avoid * sending a control queue message buffer that is too large @@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) buf_sz = struct_size(ctq, qinfo, num_chunks); ctq = kzalloc(buf_sz, GFP_KERNEL); - if (!ctq) { - err = -ENOMEM; - goto error; - } + if (!ctq) + return -ENOMEM; - mutex_lock(&vport->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; for (i = 0, k = 0; i < num_msgs; i++) { memset(ctq, 0, buf_sz); @@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) ctq->num_qinfo = cpu_to_le16(num_chunks); memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks); - err = idpf_send_mb_msg(vport->adapter, - VIRTCHNL2_OP_CONFIG_TX_QUEUES, - buf_sz, (u8 *)ctq); - if (err) - goto mbx_error; - - err = idpf_wait_for_event(vport->adapter, vport, - IDPF_VC_CONFIG_TXQ, - IDPF_VC_CONFIG_TXQ_ERR); - if (err) - goto mbx_error; + xn_params.send_buf.iov_base = ctq; + xn_params.send_buf.iov_len = buf_sz; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; k += num_chunks; totqs -= num_chunks; @@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) buf_sz = struct_size(ctq, qinfo, num_chunks); } -mbx_error: - mutex_unlock(&vport->vc_buf_lock); - kfree(ctq); -error: - kfree(qi); - - return err; + return 0; } /** @@ -1591,11 +1544,13 @@ error: */ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) { - struct virtchnl2_config_rx_queues *crq; + struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL; + struct virtchnl2_rxq_info *qi __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; u32 config_sz, chunk_sz, buf_sz; int totqs, num_msgs, num_chunks; - struct virtchnl2_rxq_info *qi; - int err = 0, i, k = 0; + ssize_t reply_sz; + int i, k = 0; totqs = vport->num_rxq + vport->num_bufq; qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL); @@ -1676,10 +1631,8 @@ common_qi_fields: } /* Make sure accounting agrees */ - if (k != totqs) { - err = -EINVAL; - goto error; - } + if (k != totqs) + return -EINVAL; /* Chunk up the queue contexts into multiple messages to avoid * sending a control queue message buffer that is too large @@ -1693,12 +1646,11 @@ common_qi_fields: buf_sz = struct_size(crq, qinfo, num_chunks); crq = kzalloc(buf_sz, GFP_KERNEL); - if (!crq) { - err = -ENOMEM; - goto error; - } + if (!crq) + return -ENOMEM; - mutex_lock(&vport->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; for (i = 0, k = 0; i < num_msgs; i++) { memset(crq, 0, buf_sz); @@ -1706,17 +1658,11 @@ common_qi_fields: crq->num_qinfo = cpu_to_le16(num_chunks); memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks); - err = idpf_send_mb_msg(vport->adapter, - VIRTCHNL2_OP_CONFIG_RX_QUEUES, - buf_sz, (u8 *)crq); - if (err) - goto mbx_error; - - err = idpf_wait_for_event(vport->adapter, vport, - IDPF_VC_CONFIG_RXQ, - IDPF_VC_CONFIG_RXQ_ERR); - if (err) - goto mbx_error; + xn_params.send_buf.iov_base = crq; + xn_params.send_buf.iov_len = buf_sz; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; k += num_chunks; totqs -= num_chunks; @@ -1725,42 +1671,28 @@ common_qi_fields: buf_sz = struct_size(crq, qinfo, num_chunks); } -mbx_error: - mutex_unlock(&vport->vc_buf_lock); - kfree(crq); -error: - kfree(qi); - - return err; + return 0; } /** * idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable * queues message * @vport: virtual port data structure - * @vc_op: virtchnl op code to send + * @ena: if true enable, false disable * * Send enable or disable queues virtchnl message. Returns 0 on success, * negative on failure. */ -static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) +static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) { + struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; + struct virtchnl2_queue_chunk *qc __free(kfree) = NULL; u32 num_msgs, num_chunks, num_txq, num_rxq, num_q; - struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_del_ena_dis_queues *eq; + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_queue_chunks *qcs; - struct virtchnl2_queue_chunk *qc; u32 config_sz, chunk_sz, buf_sz; - int i, j, k = 0, err = 0; - - /* validate virtchnl op */ - switch (vc_op) { - case VIRTCHNL2_OP_ENABLE_QUEUES: - case VIRTCHNL2_OP_DISABLE_QUEUES: - break; - default: - return -EINVAL; - } + ssize_t reply_sz; + int i, j, k = 0; num_txq = vport->num_txq + vport->num_complq; num_rxq = vport->num_rxq + vport->num_bufq; @@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } } - if (vport->num_txq != k) { - err = -EINVAL; - goto error; - } + if (vport->num_txq != k) + return -EINVAL; if (!idpf_is_queue_model_split(vport->txq_model)) goto setup_rx; @@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op) qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } - if (vport->num_complq != (k - vport->num_txq)) { - err = -EINVAL; - goto error; - } + if (vport->num_complq != (k - vport->num_txq)) + return -EINVAL; setup_rx: for (i = 0; i < vport->num_rxq_grp; i++) { @@ -1823,10 +1751,8 @@ setup_rx: qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } } - if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) { - err = -EINVAL; - goto error; - } + if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) + return -EINVAL; if (!idpf_is_queue_model_split(vport->rxq_model)) goto send_msg; @@ -1845,10 +1771,8 @@ setup_rx: } if (vport->num_bufq != k - (vport->num_txq + vport->num_complq + - vport->num_rxq)) { - err = -EINVAL; - goto error; - } + vport->num_rxq)) + return -EINVAL; send_msg: /* Chunk up the queue info into multiple messages */ @@ -1861,12 +1785,16 @@ send_msg: buf_sz = struct_size(eq, chunks.chunks, num_chunks); eq = kzalloc(buf_sz, GFP_KERNEL); - if (!eq) { - err = -ENOMEM; - goto error; - } + if (!eq) + return -ENOMEM; - mutex_lock(&vport->vc_buf_lock); + if (ena) { + xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + } else { + xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + } for (i = 0, k = 0; i < num_msgs; i++) { memset(eq, 0, buf_sz); @@ -1875,20 +1803,11 @@ send_msg: qcs = &eq->chunks; memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks); - err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq); - if (err) - goto mbx_error; - - if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES) - err = idpf_wait_for_event(adapter, vport, - IDPF_VC_ENA_QUEUES, - IDPF_VC_ENA_QUEUES_ERR); - else - err = idpf_min_wait_for_event(adapter, vport, - IDPF_VC_DIS_QUEUES, - IDPF_VC_DIS_QUEUES_ERR); - if (err) - goto mbx_error; + xn_params.send_buf.iov_base = eq; + xn_params.send_buf.iov_len = buf_sz; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; k += num_chunks; num_q -= num_chunks; @@ -1897,13 +1816,7 @@ send_msg: buf_sz = struct_size(eq, chunks.chunks, num_chunks); } -mbx_error: - mutex_unlock(&vport->vc_buf_lock); - kfree(eq); -error: - kfree(qc); - - return err; + return 0; } /** @@ -1917,12 +1830,13 @@ error: */ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) { - struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_queue_vector_maps *vqvm; - struct virtchnl2_queue_vector *vqv; + struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL; + struct virtchnl2_queue_vector *vqv __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; u32 config_sz, chunk_sz, buf_sz; u32 num_msgs, num_chunks, num_q; - int i, j, k = 0, err = 0; + ssize_t reply_sz; + int i, j, k = 0; num_q = vport->num_txq + vport->num_rxq; @@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) } } - if (vport->num_txq != k) { - err = -EINVAL; - goto error; - } + if (vport->num_txq != k) + return -EINVAL; for (i = 0; i < vport->num_rxq_grp; i++) { struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; @@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) } if (idpf_is_queue_model_split(vport->txq_model)) { - if (vport->num_rxq != k - vport->num_complq) { - err = -EINVAL; - goto error; - } + if (vport->num_rxq != k - vport->num_complq) + return -EINVAL; } else { - if (vport->num_rxq != k - vport->num_txq) { - err = -EINVAL; - goto error; - } + if (vport->num_rxq != k - vport->num_txq) + return -EINVAL; } /* Chunk up the vector info into multiple messages */ @@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) buf_sz = struct_size(vqvm, qv_maps, num_chunks); vqvm = kzalloc(buf_sz, GFP_KERNEL); - if (!vqvm) { - err = -ENOMEM; - goto error; - } + if (!vqvm) + return -ENOMEM; - mutex_lock(&vport->vc_buf_lock); + if (map) { + xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + } else { + xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR; + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + } for (i = 0, k = 0; i < num_msgs; i++) { memset(vqvm, 0, buf_sz); + xn_params.send_buf.iov_base = vqvm; + xn_params.send_buf.iov_len = buf_sz; vqvm->vport_id = cpu_to_le32(vport->vport_id); vqvm->num_qv_maps = cpu_to_le16(num_chunks); memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks); - if (map) { - err = idpf_send_mb_msg(adapter, - VIRTCHNL2_OP_MAP_QUEUE_VECTOR, - buf_sz, (u8 *)vqvm); - if (!err) - err = idpf_wait_for_event(adapter, vport, - IDPF_VC_MAP_IRQ, - IDPF_VC_MAP_IRQ_ERR); - } else { - err = idpf_send_mb_msg(adapter, - VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR, - buf_sz, (u8 *)vqvm); - if (!err) - err = - idpf_min_wait_for_event(adapter, vport, - IDPF_VC_UNMAP_IRQ, - IDPF_VC_UNMAP_IRQ_ERR); - } - if (err) - goto mbx_error; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; k += num_chunks; num_q -= num_chunks; @@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) buf_sz = struct_size(vqvm, qv_maps, num_chunks); } -mbx_error: - mutex_unlock(&vport->vc_buf_lock); - kfree(vqvm); -error: - kfree(vqv); - - return err; + return 0; } /** @@ -2062,7 +1953,7 @@ error: */ int idpf_send_enable_queues_msg(struct idpf_vport *vport) { - return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES); + return idpf_send_ena_dis_queues_msg(vport, true); } /** @@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) { int err, i; - err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); + err = idpf_send_ena_dis_queues_msg(vport, false); if (err) return err; @@ -2087,8 +1978,10 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); /* schedule the napi to receive all the marker packets */ + local_bh_disable(); for (i = 0; i < vport->num_q_vectors; i++) napi_schedule(&vport->q_vectors[i].napi); + local_bh_enable(); return idpf_wait_for_marker_event(vport); } @@ -2122,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun */ int idpf_send_delete_queues_msg(struct idpf_vport *vport) { - struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL; struct virtchnl2_create_vport *vport_params; struct virtchnl2_queue_reg_chunks *chunks; - struct virtchnl2_del_ena_dis_queues *eq; + struct idpf_vc_xn_params xn_params = {}; struct idpf_vport_config *vport_config; u16 vport_idx = vport->idx; - int buf_size, err; + ssize_t reply_sz; u16 num_chunks; + int buf_size; - vport_config = adapter->vport_config[vport_idx]; + vport_config = vport->adapter->vport_config[vport_idx]; if (vport_config->req_qs_chunks) { - struct virtchnl2_add_queues *vc_aq = - (struct virtchnl2_add_queues *)vport_config->req_qs_chunks; - chunks = &vc_aq->chunks; + chunks = &vport_config->req_qs_chunks->chunks; } else { - vport_params = adapter->vport_params_recvd[vport_idx]; + vport_params = vport->adapter->vport_params_recvd[vport_idx]; chunks = &vport_params->chunks; } @@ -2154,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport) idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks, num_chunks); - mutex_lock(&vport->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES, - buf_size, (u8 *)eq); - if (err) - goto rel_lock; - - err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES, - IDPF_VC_DEL_QUEUES_ERR); - -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - kfree(eq); + xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = eq; + xn_params.send_buf.iov_len = buf_size; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -2203,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport) int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, u16 num_complq, u16 num_rx_q, u16 num_rx_bufq) { - struct idpf_adapter *adapter = vport->adapter; + struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; struct idpf_vport_config *vport_config; - struct virtchnl2_add_queues aq = { }; - struct virtchnl2_add_queues *vc_msg; + struct virtchnl2_add_queues aq = {}; u16 vport_idx = vport->idx; - int size, err; + ssize_t reply_sz; + int size; + + vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!vc_msg) + return -ENOMEM; - vport_config = adapter->vport_config[vport_idx]; + vport_config = vport->adapter->vport_config[vport_idx]; + kfree(vport_config->req_qs_chunks); + vport_config->req_qs_chunks = NULL; aq.vport_id = cpu_to_le32(vport->vport_id); aq.num_tx_q = cpu_to_le16(num_tx_q); @@ -2218,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, aq.num_rx_q = cpu_to_le16(num_rx_q); aq.num_rx_bufq = cpu_to_le16(num_rx_bufq); - mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES, - sizeof(struct virtchnl2_add_queues), (u8 *)&aq); - if (err) - goto rel_lock; - - /* We want vport to be const to prevent incidental code changes making - * changes to the vport config. We're making a special exception here - * to discard const to use the virtchnl. - */ - err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport, - IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR); - if (err) - goto rel_lock; - - kfree(vport_config->req_qs_chunks); - vport_config->req_qs_chunks = NULL; + xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = &aq; + xn_params.send_buf.iov_len = sizeof(aq); + xn_params.recv_buf.iov_base = vc_msg; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; - vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg; /* compare vc_msg num queues with vport num queues */ if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q || le16_to_cpu(vc_msg->num_rx_q) != num_rx_q || le16_to_cpu(vc_msg->num_tx_complq) != num_complq || - le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) { - err = -EINVAL; - goto rel_lock; - } + le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) + return -EINVAL; size = struct_size(vc_msg, chunks.chunks, le16_to_cpu(vc_msg->chunks.num_chunks)); - vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); - if (!vport_config->req_qs_chunks) { - err = -ENOMEM; - goto rel_lock; - } + if (reply_sz < size) + return -EIO; -rel_lock: - mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock); + vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL); + if (!vport_config->req_qs_chunks) + return -ENOMEM; - return err; + return 0; } /** @@ -2270,53 +2147,49 @@ rel_lock: */ int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors) { - struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec; - struct virtchnl2_alloc_vectors ac = { }; + struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; + struct virtchnl2_alloc_vectors ac = {}; + ssize_t reply_sz; u16 num_vchunks; - int size, err; + int size; ac.num_vectors = cpu_to_le16(num_vectors); - mutex_lock(&adapter->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS, - sizeof(ac), (u8 *)&ac); - if (err) - goto rel_lock; + rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!rcvd_vec) + return -ENOMEM; - err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS, - IDPF_VC_ALLOC_VECTORS_ERR); - if (err) - goto rel_lock; + xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS; + xn_params.send_buf.iov_base = ∾ + xn_params.send_buf.iov_len = sizeof(ac); + xn_params.recv_buf.iov_base = rcvd_vec; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; - rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg; num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks); - size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks); - if (size > sizeof(adapter->vc_msg)) { - err = -EINVAL; - goto rel_lock; - } + if (reply_sz < size) + return -EIO; + + if (size > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; kfree(adapter->req_vec_chunks); - adapter->req_vec_chunks = NULL; - adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL); - if (!adapter->req_vec_chunks) { - err = -ENOMEM; - goto rel_lock; - } + adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL); + if (!adapter->req_vec_chunks) + return -ENOMEM; - alloc_vec = adapter->req_vec_chunks; - if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) { + if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) { kfree(adapter->req_vec_chunks); adapter->req_vec_chunks = NULL; - err = -EINVAL; + return -EINVAL; } -rel_lock: - mutex_unlock(&adapter->vc_buf_lock); - - return err; + return 0; } /** @@ -2329,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter) { struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks; struct virtchnl2_vector_chunks *vcs = &ac->vchunks; - int buf_size, err; + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; + int buf_size; buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks)); - mutex_lock(&adapter->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size, - (u8 *)vcs); - if (err) - goto rel_lock; - - err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS, - IDPF_VC_DEALLOC_VECTORS_ERR); - if (err) - goto rel_lock; + xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS; + xn_params.send_buf.iov_base = vcs; + xn_params.send_buf.iov_len = buf_size; + xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; kfree(adapter->req_vec_chunks); adapter->req_vec_chunks = NULL; -rel_lock: - mutex_unlock(&adapter->vc_buf_lock); - - return err; + return 0; } /** @@ -2374,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter) */ int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs) { - struct virtchnl2_sriov_vfs_info svi = { }; - int err; + struct virtchnl2_sriov_vfs_info svi = {}; + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; svi.num_vfs = cpu_to_le16(num_vfs); + xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = &svi; + xn_params.send_buf.iov_len = sizeof(svi); + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); - mutex_lock(&adapter->vc_buf_lock); - - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS, - sizeof(svi), (u8 *)&svi); - if (err) - goto rel_lock; - - err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS, - IDPF_VC_SET_SRIOV_VFS_ERR); - -rel_lock: - mutex_unlock(&adapter->vc_buf_lock); - - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -2405,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct rtnl_link_stats64 *netstats = &np->netstats; - struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_vport_stats stats_msg = { }; - struct virtchnl2_vport_stats *stats; - int err; + struct virtchnl2_vport_stats stats_msg = {}; + struct idpf_vc_xn_params xn_params = {}; + ssize_t reply_sz; + /* Don't send get_stats message if the link is down */ if (np->state <= __IDPF_VPORT_DOWN) @@ -2416,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport) stats_msg.vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_GET_STATS; + xn_params.send_buf.iov_base = &stats_msg; + xn_params.send_buf.iov_len = sizeof(stats_msg); + xn_params.recv_buf = xn_params.send_buf; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS, - sizeof(struct virtchnl2_vport_stats), - (u8 *)&stats_msg); - if (err) - goto rel_lock; - - err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS, - IDPF_VC_GET_STATS_ERR); - if (err) - goto rel_lock; - - stats = (struct virtchnl2_vport_stats *)vport->vc_msg; + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (reply_sz < sizeof(stats_msg)) + return -EIO; spin_lock_bh(&np->stats_lock); - netstats->rx_packets = le64_to_cpu(stats->rx_unicast) + - le64_to_cpu(stats->rx_multicast) + - le64_to_cpu(stats->rx_broadcast); - netstats->rx_bytes = le64_to_cpu(stats->rx_bytes); - netstats->rx_dropped = le64_to_cpu(stats->rx_discards); - netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop); - netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length); - - netstats->tx_packets = le64_to_cpu(stats->tx_unicast) + - le64_to_cpu(stats->tx_multicast) + - le64_to_cpu(stats->tx_broadcast); - netstats->tx_bytes = le64_to_cpu(stats->tx_bytes); - netstats->tx_errors = le64_to_cpu(stats->tx_errors); - netstats->tx_dropped = le64_to_cpu(stats->tx_discards); - - vport->port_stats.vport_stats = *stats; + netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) + + le64_to_cpu(stats_msg.rx_multicast) + + le64_to_cpu(stats_msg.rx_broadcast); + netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) + + le64_to_cpu(stats_msg.tx_multicast) + + le64_to_cpu(stats_msg.tx_broadcast); + netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes); + netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes); + netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors); + netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors); + netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards); + netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards); + + vport->port_stats.vport_stats = stats_msg; spin_unlock_bh(&np->stats_lock); -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - - return err; + return 0; } /** @@ -2467,70 +2320,70 @@ rel_lock: */ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) { - struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_rss_lut *recv_rl; + struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL; + struct virtchnl2_rss_lut *rl __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; struct idpf_rss_data *rss_data; - struct virtchnl2_rss_lut *rl; int buf_size, lut_buf_size; - int i, err; + ssize_t reply_sz; + int i; - rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + rss_data = + &vport->adapter->vport_config[vport->idx]->user_config.rss_data; buf_size = struct_size(rl, lut, rss_data->rss_lut_size); rl = kzalloc(buf_size, GFP_KERNEL); if (!rl) return -ENOMEM; rl->vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); - if (!get) { + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = rl; + xn_params.send_buf.iov_len = buf_size; + + if (get) { + recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!recv_rl) + return -ENOMEM; + xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT; + xn_params.recv_buf.iov_base = recv_rl; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; + } else { rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size); for (i = 0; i < rss_data->rss_lut_size; i++) rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]); - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT, - buf_size, (u8 *)rl); - if (err) - goto free_mem; - - err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT, - IDPF_VC_SET_RSS_LUT_ERR); - - goto free_mem; + xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT; } + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (!get) + return 0; + if (reply_sz < sizeof(struct virtchnl2_rss_lut)) + return -EIO; - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT, - buf_size, (u8 *)rl); - if (err) - goto free_mem; + lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32); + if (reply_sz < lut_buf_size) + return -EIO; - err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT, - IDPF_VC_GET_RSS_LUT_ERR); - if (err) - goto free_mem; - - recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg; + /* size didn't change, we can reuse existing lut buf */ if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries)) goto do_memcpy; rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries); kfree(rss_data->rss_lut); - lut_buf_size = rss_data->rss_lut_size * sizeof(u32); rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL); if (!rss_data->rss_lut) { rss_data->rss_lut_size = 0; - err = -ENOMEM; - goto free_mem; + return -ENOMEM; } do_memcpy: - memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size); -free_mem: - mutex_unlock(&vport->vc_buf_lock); - kfree(rl); + memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size); - return err; + return 0; } /** @@ -2542,68 +2395,70 @@ free_mem: */ int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) { - struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_rss_key *recv_rk; + struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL; + struct virtchnl2_rss_key *rk __free(kfree) = NULL; + struct idpf_vc_xn_params xn_params = {}; struct idpf_rss_data *rss_data; - struct virtchnl2_rss_key *rk; - int i, buf_size, err; + ssize_t reply_sz; + int i, buf_size; + u16 key_size; - rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; + rss_data = + &vport->adapter->vport_config[vport->idx]->user_config.rss_data; buf_size = struct_size(rk, key_flex, rss_data->rss_key_size); rk = kzalloc(buf_size, GFP_KERNEL); if (!rk) return -ENOMEM; rk->vport_id = cpu_to_le32(vport->vport_id); - mutex_lock(&vport->vc_buf_lock); - + xn_params.send_buf.iov_base = rk; + xn_params.send_buf.iov_len = buf_size; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; if (get) { - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY, - buf_size, (u8 *)rk); - if (err) - goto error; - - err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY, - IDPF_VC_GET_RSS_KEY_ERR); - if (err) - goto error; - - recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg; - if (rss_data->rss_key_size != - le16_to_cpu(recv_rk->key_len)) { - rss_data->rss_key_size = - min_t(u16, NETDEV_RSS_KEY_LEN, - le16_to_cpu(recv_rk->key_len)); - kfree(rss_data->rss_key); - rss_data->rss_key = kzalloc(rss_data->rss_key_size, - GFP_KERNEL); - if (!rss_data->rss_key) { - rss_data->rss_key_size = 0; - err = -ENOMEM; - goto error; - } - } - memcpy(rss_data->rss_key, recv_rk->key_flex, - rss_data->rss_key_size); + recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); + if (!recv_rk) + return -ENOMEM; + + xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY; + xn_params.recv_buf.iov_base = recv_rk; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; } else { rk->key_len = cpu_to_le16(rss_data->rss_key_size); for (i = 0; i < rss_data->rss_key_size; i++) rk->key_flex[i] = rss_data->rss_key[i]; - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY, - buf_size, (u8 *)rk); - if (err) - goto error; + xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY; + } + + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; + if (!get) + return 0; + if (reply_sz < sizeof(struct virtchnl2_rss_key)) + return -EIO; + + key_size = min_t(u16, NETDEV_RSS_KEY_LEN, + le16_to_cpu(recv_rk->key_len)); + if (reply_sz < key_size) + return -EIO; - err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY, - IDPF_VC_SET_RSS_KEY_ERR); + /* key len didn't change, reuse existing buf */ + if (rss_data->rss_key_size == key_size) + goto do_memcpy; + + rss_data->rss_key_size = key_size; + kfree(rss_data->rss_key); + rss_data->rss_key = kzalloc(key_size, GFP_KERNEL); + if (!rss_data->rss_key) { + rss_data->rss_key_size = 0; + return -ENOMEM; } -error: - mutex_unlock(&vport->vc_buf_lock); - kfree(rk); +do_memcpy: + memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size); - return err; + return 0; } /** @@ -2655,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, */ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) { + struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; + struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; - struct virtchnl2_get_ptype_info get_ptype_info; int max_ptype, ptypes_recvd = 0, ptype_offset; struct idpf_adapter *adapter = vport->adapter; - struct virtchnl2_get_ptype_info *ptype_info; + struct idpf_vc_xn_params xn_params = {}; u16 next_ptype_id = 0; - int err = 0, i, j, k; + ssize_t reply_sz; + int i, j, k; if (idpf_is_queue_model_split(vport->rxq_model)) max_ptype = IDPF_RX_MAX_PTYPE; @@ -2670,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); + if (!get_ptype_info) + return -ENOMEM; + ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL); if (!ptype_info) return -ENOMEM; - mutex_lock(&adapter->vc_buf_lock); + xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO; + xn_params.send_buf.iov_base = get_ptype_info; + xn_params.send_buf.iov_len = sizeof(*get_ptype_info); + xn_params.recv_buf.iov_base = ptype_info; + xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; while (next_ptype_id < max_ptype) { - get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id); + get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id); if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype) - get_ptype_info.num_ptypes = + get_ptype_info->num_ptypes = cpu_to_le16(max_ptype - next_ptype_id); else - get_ptype_info.num_ptypes = + get_ptype_info->num_ptypes = cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF); - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO, - sizeof(struct virtchnl2_get_ptype_info), - (u8 *)&get_ptype_info); - if (err) - goto vc_buf_unlock; - - err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO, - IDPF_VC_GET_PTYPE_INFO_ERR); - if (err) - goto vc_buf_unlock; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; - memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN); + if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) + return -EIO; ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); - if (ptypes_recvd > max_ptype) { - err = -EINVAL; - goto vc_buf_unlock; - } + if (ptypes_recvd > max_ptype) + return -EINVAL; - next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) + - le16_to_cpu(get_ptype_info.num_ptypes); + next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) + + le16_to_cpu(get_ptype_info->num_ptypes); ptype_offset = IDPF_RX_PTYPE_HDR_SZ; @@ -2719,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ((u8 *)ptype_info + ptype_offset); ptype_offset += IDPF_GET_PTYPE_SIZE(ptype); - if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) { - err = -EINVAL; - goto vc_buf_unlock; - } + if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) + return -EINVAL; /* 0xFFFF indicates end of ptypes */ if (le16_to_cpu(ptype->ptype_id_10) == - IDPF_INVALID_PTYPE_ID) { - err = 0; - goto vc_buf_unlock; - } + IDPF_INVALID_PTYPE_ID) + return 0; if (idpf_is_queue_model_split(vport->rxq_model)) k = le16_to_cpu(ptype->ptype_id_10); @@ -2857,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) } } -vc_buf_unlock: - mutex_unlock(&adapter->vc_buf_lock); - kfree(ptype_info); - - return err; + return 0; } /** @@ -2873,27 +2723,20 @@ vc_buf_unlock: */ int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) { + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_loopback loopback; - int err; + ssize_t reply_sz; loopback.vport_id = cpu_to_le32(vport->vport_id); loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); - mutex_lock(&vport->vc_buf_lock); - - err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK, - sizeof(loopback), (u8 *)&loopback); - if (err) - goto rel_lock; + xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = &loopback; + xn_params.send_buf.iov_len = sizeof(loopback); + reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); - err = idpf_wait_for_event(vport->adapter, vport, - IDPF_VC_LOOPBACK_STATE, - IDPF_VC_LOOPBACK_STATE_ERR); - -rel_lock: - mutex_unlock(&vport->vc_buf_lock); - - return err; + return reply_sz < 0 ? reply_sz : 0; } /** @@ -2958,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter) return -ENOENT; } - adapter->state = __IDPF_STARTUP; + adapter->state = __IDPF_VER_CHECK; return 0; } @@ -3055,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter) u16 num_max_vports; int err = 0; + if (!adapter->vcxn_mngr) { + adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL); + if (!adapter->vcxn_mngr) { + err = -ENOMEM; + goto init_failed; + } + } + idpf_vc_xn_init(adapter->vcxn_mngr); + while (adapter->state != __IDPF_INIT_SW) { switch (adapter->state) { - case __IDPF_STARTUP: - if (idpf_send_ver_msg(adapter)) - goto init_failed; - adapter->state = __IDPF_VER_CHECK; - goto restart; case __IDPF_VER_CHECK: - err = idpf_recv_ver_msg(adapter); - if (err == -EIO) { - return err; - } else if (err == -EAGAIN) { - adapter->state = __IDPF_STARTUP; + err = idpf_send_ver_msg(adapter); + switch (err) { + case 0: + /* success, move state machine forward */ + adapter->state = __IDPF_GET_CAPS; + fallthrough; + case -EAGAIN: goto restart; - } else if (err) { + default: + /* Something bad happened, try again but only a + * few times. + */ goto init_failed; } - if (idpf_send_get_caps_msg(adapter)) - goto init_failed; - adapter->state = __IDPF_GET_CAPS; - goto restart; case __IDPF_GET_CAPS: - if (idpf_recv_get_caps_msg(adapter)) + err = idpf_send_get_caps_msg(adapter); + if (err) goto init_failed; adapter->state = __IDPF_INIT_SW; break; default: dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n", adapter->state); + err = -EINVAL; goto init_failed; } break; @@ -3142,7 +2992,9 @@ restart: queue_delayed_work(adapter->init_wq, &adapter->init_task, msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07))); - goto no_err; + set_bit(IDPF_VC_CORE_INIT, adapter->flags); + + return 0; err_intr_req: cancel_delayed_work_sync(&adapter->serv_task); @@ -3151,7 +3003,6 @@ err_intr_req: err_netdev_alloc: kfree(adapter->vports); adapter->vports = NULL; -no_err: return err; init_failed: @@ -3168,7 +3019,9 @@ init_failed: * register writes might not have taken effect. Retry to initialize * the mailbox again */ - adapter->state = __IDPF_STARTUP; + adapter->state = __IDPF_VER_CHECK; + if (adapter->vcxn_mngr) + idpf_vc_xn_shutdown(adapter->vcxn_mngr); idpf_deinit_dflt_mbx(adapter); set_bit(IDPF_HR_DRV_LOAD, adapter->flags); queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task, @@ -3184,29 +3037,22 @@ init_failed: */ void idpf_vc_core_deinit(struct idpf_adapter *adapter) { - int i; + if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) + return; + idpf_vc_xn_shutdown(adapter->vcxn_mngr); idpf_deinit_task(adapter); idpf_intr_rel(adapter); - /* Set all bits as we dont know on which vc_state the vhnl_wq is - * waiting on and wakeup the virtchnl workqueue even if it is waiting - * for the response as we are going down - */ - for (i = 0; i < IDPF_VC_NBITS; i++) - set_bit(i, adapter->vc_state); - wake_up(&adapter->vchnl_wq); cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->mbx_task); idpf_vport_params_buf_rel(adapter); - /* Clear all the bits */ - for (i = 0; i < IDPF_VC_NBITS; i++) - clear_bit(i, adapter->vc_state); - kfree(adapter->vports); adapter->vports = NULL; + + clear_bit(IDPF_VC_CORE_INIT, adapter->flags); } /** @@ -3622,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport) } /** + * idpf_mac_filter_async_handler - Async callback for mac filters + * @adapter: private data struct + * @xn: transaction for message + * @ctlq_msg: received message + * + * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is + * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult + * situation to deal with errors returned on the reply. The best we can + * ultimately do is remove it from our list of mac filters and report the + * error. + */ +static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter, + struct idpf_vc_xn *xn, + const struct idpf_ctlq_msg *ctlq_msg) +{ + struct virtchnl2_mac_addr_list *ma_list; + struct idpf_vport_config *vport_config; + struct virtchnl2_mac_addr *mac_addr; + struct idpf_mac_filter *f, *tmp; + struct list_head *ma_list_head; + struct idpf_vport *vport; + u16 num_entries; + int i; + + /* if success we're done, we're only here if something bad happened */ + if (!ctlq_msg->cookie.mbx.chnl_retval) + return 0; + + /* make sure at least struct is there */ + if (xn->reply_sz < sizeof(*ma_list)) + goto invalid_payload; + + ma_list = ctlq_msg->ctx.indirect.payload->va; + mac_addr = ma_list->mac_addr_list; + num_entries = le16_to_cpu(ma_list->num_mac_addr); + /* we should have received a buffer at least this big */ + if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries)) + goto invalid_payload; + + vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); + if (!vport) + goto invalid_payload; + + vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)]; + ma_list_head = &vport_config->user_config.mac_filter_list; + + /* We can't do much to reconcile bad filters at this point, however we + * should at least remove them from our list one way or the other so we + * have some idea what good filters we have. + */ + spin_lock_bh(&vport_config->mac_filter_list_lock); + list_for_each_entry_safe(f, tmp, ma_list_head, list) + for (i = 0; i < num_entries; i++) + if (ether_addr_equal(mac_addr[i].addr, f->macaddr)) + list_del(&f->list); + spin_unlock_bh(&vport_config->mac_filter_list_lock); + dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n", + xn->vc_op); + + return 0; + +invalid_payload: + dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n", + xn->vc_op, xn->reply_sz); + + return -EINVAL; +} + +/** * idpf_add_del_mac_filters - Add/del mac filters * @vport: Virtual port data structure * @np: Netdev private structure @@ -3634,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, struct idpf_netdev_priv *np, bool add, bool async) { - struct virtchnl2_mac_addr_list *ma_list = NULL; + struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL; + struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL; struct idpf_adapter *adapter = np->adapter; + struct idpf_vc_xn_params xn_params = {}; struct idpf_vport_config *vport_config; - enum idpf_vport_config_flags mac_flag; - struct pci_dev *pdev = adapter->pdev; - enum idpf_vport_vc_state vc, vc_err; - struct virtchnl2_mac_addr *mac_addr; - struct idpf_mac_filter *f, *tmp; u32 num_msgs, total_filters = 0; - int i = 0, k, err = 0; - u32 vop; + struct idpf_mac_filter *f; + ssize_t reply_sz; + int i = 0, k; + + xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR : + VIRTCHNL2_OP_DEL_MAC_ADDR; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.async = async; + xn_params.async_handler = idpf_mac_filter_async_handler; vport_config = adapter->vport_config[np->vport_idx]; spin_lock_bh(&vport_config->mac_filter_list_lock); @@ -3668,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr), GFP_ATOMIC); if (!mac_addr) { - err = -ENOMEM; spin_unlock_bh(&vport_config->mac_filter_list_lock); - goto error; + + return -ENOMEM; } - list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list, - list) { + list_for_each_entry(f, &vport_config->user_config.mac_filter_list, + list) { if (add && f->add) { ether_addr_copy(mac_addr[i].addr, f->macaddr); i++; @@ -3693,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, spin_unlock_bh(&vport_config->mac_filter_list_lock); - if (add) { - vop = VIRTCHNL2_OP_ADD_MAC_ADDR; - vc = IDPF_VC_ADD_MAC_ADDR; - vc_err = IDPF_VC_ADD_MAC_ADDR_ERR; - mac_flag = IDPF_VPORT_ADD_MAC_REQ; - } else { - vop = VIRTCHNL2_OP_DEL_MAC_ADDR; - vc = IDPF_VC_DEL_MAC_ADDR; - vc_err = IDPF_VC_DEL_MAC_ADDR_ERR; - mac_flag = IDPF_VPORT_DEL_MAC_REQ; - } - /* Chunk up the filters into multiple messages to avoid * sending a control queue message buffer that is too large */ num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG); - if (!async) - mutex_lock(&vport->vc_buf_lock); - for (i = 0, k = 0; i < num_msgs; i++) { u32 entries_size, buf_size, num_entries; @@ -3724,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) { kfree(ma_list); ma_list = kzalloc(buf_size, GFP_ATOMIC); - if (!ma_list) { - err = -ENOMEM; - goto list_prep_error; - } + if (!ma_list) + return -ENOMEM; } else { memset(ma_list, 0, buf_size); } @@ -3736,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport, ma_list->num_mac_addr = cpu_to_le16(num_entries); memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size); - if (async) - set_bit(mac_flag, vport_config->flags); - - err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list); - if (err) - goto mbx_error; - - if (!async) { - err = idpf_wait_for_event(adapter, vport, vc, vc_err); - if (err) - goto mbx_error; - } + xn_params.send_buf.iov_base = ma_list; + xn_params.send_buf.iov_len = buf_size; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); + if (reply_sz < 0) + return reply_sz; k += num_entries; total_filters -= num_entries; } -mbx_error: - if (!async) - mutex_unlock(&vport->vc_buf_lock); - kfree(ma_list); -list_prep_error: - kfree(mac_addr); -error: - if (err) - dev_err(&pdev->dev, "Failed to add or del mac filters %d", err); - - return err; + return 0; } /** @@ -3780,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, struct idpf_vport_user_config_data *config_data, u32 vport_id) { + struct idpf_vc_xn_params xn_params = {}; struct virtchnl2_promisc_info vpi; + ssize_t reply_sz; u16 flags = 0; - int err; if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags)) flags |= VIRTCHNL2_UNICAST_PROMISC; @@ -3792,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter, vpi.vport_id = cpu_to_le32(vport_id); vpi.flags = cpu_to_le16(flags); - err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE, - sizeof(struct virtchnl2_promisc_info), - (u8 *)&vpi); + xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE; + xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC; + xn_params.send_buf.iov_base = &vpi; + xn_params.send_buf.iov_len = sizeof(vpi); + /* setting promiscuous is only ever done asynchronously */ + xn_params.async = true; + reply_sz = idpf_vc_xn_exec(adapter, &xn_params); - return err; + return reply_sz < 0 ? reply_sz : 0; } diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h new file mode 100644 index 000000000000..83da5d8da56b --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef _IDPF_VIRTCHNL_H_ +#define _IDPF_VIRTCHNL_H_ + +struct idpf_adapter; +struct idpf_netdev_priv; +struct idpf_vec_regs; +struct idpf_vport; +struct idpf_vport_max_q; +struct idpf_vport_user_config_data; + +int idpf_init_dflt_mbx(struct idpf_adapter *adapter); +void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter); +int idpf_vc_core_init(struct idpf_adapter *adapter); +void idpf_vc_core_deinit(struct idpf_adapter *adapter); + +int idpf_get_reg_intr_vecs(struct idpf_vport *vport, + struct idpf_vec_regs *reg_vals); +int idpf_queue_reg_init(struct idpf_vport *vport); +int idpf_vport_queue_ids_init(struct idpf_vport *vport); + +int idpf_recv_mb_msg(struct idpf_adapter *adapter); +int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op, + u16 msg_size, u8 *msg, u16 cookie); + +void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q); +u32 idpf_get_vport_id(struct idpf_vport *vport); +int idpf_send_create_vport_msg(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_send_destroy_vport_msg(struct idpf_vport *vport); +int idpf_send_enable_vport_msg(struct idpf_vport *vport); +int idpf_send_disable_vport_msg(struct idpf_vport *vport); + +int idpf_vport_adjust_qs(struct idpf_vport *vport); +int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter, + struct idpf_vport_max_q *max_q); +int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, + u16 num_complq, u16 num_rx_q, u16 num_rx_bufq); +int idpf_send_delete_queues_msg(struct idpf_vport *vport); +int idpf_send_enable_queues_msg(struct idpf_vport *vport); +int idpf_send_disable_queues_msg(struct idpf_vport *vport); +int idpf_send_config_queues_msg(struct idpf_vport *vport); + +int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport); +int idpf_get_vec_ids(struct idpf_adapter *adapter, + u16 *vecids, int num_vecids, + struct virtchnl2_vector_chunks *chunks); +int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors); +int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter); +int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map); + +int idpf_add_del_mac_filters(struct idpf_vport *vport, + struct idpf_netdev_priv *np, + bool add, bool async); +int idpf_set_promiscuous(struct idpf_adapter *adapter, + struct idpf_vport_user_config_data *config_data, + u32 vport_id); +int idpf_check_supported_desc_ids(struct idpf_vport *vport); +int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport); +int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport); +int idpf_send_get_stats_msg(struct idpf_vport *vport); +int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs); +int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get); +int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get); + +#endif /* _IDPF_VIRTCHNL_H_ */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index cebb44f51d5f..518298bbdadc 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -202,7 +202,7 @@ static struct notifier_block dca_notifier = { #endif #ifdef CONFIG_PCI_IOV static unsigned int max_vfs; -module_param(max_vfs, uint, 0); +module_param(max_vfs, uint, 0444); MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function"); #endif /* CONFIG_PCI_IOV */ @@ -2538,7 +2538,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 319c544b9f04..f94570556120 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -957,7 +957,7 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); /* adjust timestamp for the TX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_TX_LATENCY_10; @@ -1003,6 +1003,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, ktime_t *timestamp) { struct igb_adapter *adapter = q_vector->adapter; + struct e1000_hw *hw = &adapter->hw; struct skb_shared_hwtstamps ts; __le64 *regval = (__le64 *)va; int adjust = 0; @@ -1022,7 +1023,7 @@ int igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, void *va, igb_ptp_systim_to_hwtstamp(adapter, &ts, le64_to_cpu(regval[1])); /* adjust timestamp for the RX latency based on link speed */ - if (adapter->hw.mac.type == e1000_i210) { + if (hw->mac.type == e1000_i210 || hw->mac.type == e1000_i211) { switch (adapter->link_speed) { case SPEED_10: adjust = IGB_I210_RX_LATENCY_10; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index a4d4f00e6a87..b0cf310e6f7b 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2655,7 +2655,7 @@ igbvf_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index cfa6baccec55..90316dc58630 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -570,7 +570,6 @@ struct igc_q_vector { struct rcu_head rcu; /* to avoid race with update stats on free */ char name[IFNAMSIZ + 9]; - struct net_device poll_dev; /* for dynamic allocation of rings associated with this q_vector */ struct igc_ring ring[] ____cacheline_internodealigned_in_smp; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 3af52d238f3b..14924dd8a4b9 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -5277,7 +5277,7 @@ igc_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IGC_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | @@ -6488,7 +6488,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, int cpu = smp_processor_id(); struct netdev_queue *nq; struct igc_ring *ring; - int i, drops; + int i, nxmit; if (unlikely(!netif_carrier_ok(dev))) return -ENETDOWN; @@ -6504,16 +6504,15 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, /* Avoid transmit queue timeout since we share it with the slow path */ txq_trans_cond_update(nq); - drops = 0; + nxmit = 0; for (i = 0; i < num_frames; i++) { int err; struct xdp_frame *xdpf = frames[i]; err = igc_xdp_init_tx_descriptor(ring, xdpf); - if (err) { - xdp_return_frame_rx_napi(xdpf); - drops++; - } + if (err) + break; + nxmit++; } if (flags & XDP_XMIT_FLUSH) @@ -6521,7 +6520,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, __netif_tx_unlock(nq); - return num_frames - drops; + return nxmit; } static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e0c300fe5cee..cdaf087b4e85 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -334,7 +334,9 @@ static int ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *autoneg = true; return 0; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 633bac1543dd..6e6e6f1847b6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -349,6 +349,8 @@ static int ixgbe_get_link_ksettings(struct net_device *netdev, case ixgbe_sfp_type_1g_sx_core1: case ixgbe_sfp_type_1g_lx_core0: case ixgbe_sfp_type_1g_lx_core1: + case ixgbe_sfp_type_1g_bx_core0: + case ixgbe_sfp_type_1g_bx_core1: ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE); ethtool_link_ksettings_add_link_mode(cmd, advertising, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index e23c3614fb10..8fe8107eb854 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -153,7 +153,7 @@ MODULE_PARM_DESC(max_vfs, #endif /* CONFIG_PCI_IOV */ static bool allow_unsupported_sfp; -module_param(allow_unsupported_sfp, bool, 0); +module_param(allow_unsupported_sfp, bool, 0444); MODULE_PARM_DESC(allow_unsupported_sfp, "Allow unsupported and untested SFP+ modules on 82599-based adapters"); @@ -2939,8 +2939,8 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) { - u32 mask; struct ixgbe_hw *hw = &adapter->hw; + u32 mask; switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -10205,7 +10205,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | @@ -10525,6 +10525,44 @@ static void ixgbe_reset_rxr_stats(struct ixgbe_ring *rx_ring) } /** + * ixgbe_irq_disable_single - Disable single IRQ vector + * @adapter: adapter structure + * @ring: ring index + **/ +static void ixgbe_irq_disable_single(struct ixgbe_adapter *adapter, u32 ring) +{ + struct ixgbe_hw *hw = &adapter->hw; + u64 qmask = BIT_ULL(ring); + u32 mask; + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82598EB: + mask = qmask & IXGBE_EIMC_RTX_QUEUE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); + mask = (qmask >> 32); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask); + break; + default: + break; + } + IXGBE_WRITE_FLUSH(&adapter->hw); + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + synchronize_irq(adapter->msix_entries[ring].vector); + else + synchronize_irq(adapter->pdev->irq); +} + +/** * ixgbe_txrx_ring_disable - Disable Rx/Tx/XDP Tx rings * @adapter: adapter structure * @ring: ring index @@ -10540,6 +10578,11 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; + ixgbe_irq_disable_single(adapter, ring); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_disable(&rx_ring->q_vector->napi); + ixgbe_disable_txr(adapter, tx_ring); if (xdp_ring) ixgbe_disable_txr(adapter, xdp_ring); @@ -10548,9 +10591,6 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) if (xdp_ring) synchronize_rcu(); - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_disable(&rx_ring->q_vector->napi); - ixgbe_clean_tx_ring(tx_ring); if (xdp_ring) ixgbe_clean_tx_ring(xdp_ring); @@ -10578,9 +10618,6 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) tx_ring = adapter->tx_ring[ring]; xdp_ring = adapter->xdp_ring[ring]; - /* Rx/Tx/XDP Tx share the same napi context. */ - napi_enable(&rx_ring->q_vector->napi); - ixgbe_configure_tx_ring(adapter, tx_ring); if (xdp_ring) ixgbe_configure_tx_ring(adapter, xdp_ring); @@ -10589,6 +10626,11 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); if (xdp_ring) clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + + /* Rx/Tx/XDP Tx share the same napi context. */ + napi_enable(&rx_ring->q_vector->napi); + ixgbe_irq_enable_queues(adapter, BIT_ULL(ring)); + IXGBE_WRITE_FLUSH(&adapter->hw); } /** diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 75e9453331ed..07eaa3c3f4d3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -1532,6 +1532,7 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; struct ixgbe_adapter *adapter = hw->back; u8 oui_bytes[3] = {0, 0, 0}; + u8 bitrate_nominal = 0; u8 comp_codes_10g = 0; u8 comp_codes_1g = 0; u16 enforce_sfp = 0; @@ -1576,7 +1577,12 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_CABLE_TECHNOLOGY, &cable_tech); + if (status) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_BITRATE_NOMINAL, + &bitrate_nominal); if (status) goto err_read_i2c_eeprom; @@ -1659,6 +1665,18 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_1g_lx_core1; + /* Support only Ethernet 1000BASE-BX10, checking the Bit Rate + * Nominal Value as per SFF-8472 by convention 1.25 Gb/s should + * be rounded up to 0Dh (13 in units of 100 MBd) for 1000BASE-BX + */ + } else if ((comp_codes_1g & IXGBE_SFF_BASEBX10_CAPABLE) && + (bitrate_nominal == 0xD)) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = + ixgbe_sfp_type_1g_bx_core0; + else + hw->phy.sfp_type = + ixgbe_sfp_type_1g_bx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } @@ -1747,7 +1765,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) { hw->phy.type = ixgbe_phy_sfp_unsupported; return -EOPNOTSUPP; } @@ -1763,7 +1783,9 @@ int ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 || hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_bx_core1)) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) return 0; @@ -1999,12 +2021,14 @@ int ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || sfp_type == ixgbe_sfp_type_1g_lx_core0 || sfp_type == ixgbe_sfp_type_1g_cu_core0 || - sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type == ixgbe_sfp_type_1g_sx_core0 || + sfp_type == ixgbe_sfp_type_1g_bx_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || sfp_type == ixgbe_sfp_type_1g_lx_core1 || sfp_type == ixgbe_sfp_type_1g_cu_core1 || - sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type == ixgbe_sfp_type_1g_sx_core1 || + sfp_type == ixgbe_sfp_type_1g_bx_core1) sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index beedcb7bec0d..14aa2ca51f70 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -17,6 +17,7 @@ #define IXGBE_SFF_1GBE_COMP_CODES 0x6 #define IXGBE_SFF_10GBE_COMP_CODES 0x3 #define IXGBE_SFF_CABLE_TECHNOLOGY 0x8 +#define IXGBE_SFF_BITRATE_NOMINAL 0xC #define IXGBE_SFF_CABLE_SPEC_COMP 0x3C #define IXGBE_SFF_SFF_8472_SWAP 0x5C #define IXGBE_SFF_SFF_8472_COMP 0x5E @@ -39,6 +40,7 @@ #define IXGBE_SFF_1GBASESX_CAPABLE 0x1 #define IXGBE_SFF_1GBASELX_CAPABLE 0x2 #define IXGBE_SFF_1GBASET_CAPABLE 0x8 +#define IXGBE_SFF_BASEBX10_CAPABLE 0x64 #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 #define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index d44c58130be2..ed440dd0c4f9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -3210,6 +3210,9 @@ enum ixgbe_sfp_type { ixgbe_sfp_type_1g_sx_core1 = 12, ixgbe_sfp_type_1g_lx_core0 = 13, ixgbe_sfp_type_1g_lx_core1 = 14, + ixgbe_sfp_type_1g_bx_core0 = 15, + ixgbe_sfp_type_1g_bx_core1 = 16, + ixgbe_sfp_type_not_present = 0xFFFE, ixgbe_sfp_type_unknown = 0xFFFF }; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index a44e4bd56142..9c960017a6de 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -4413,7 +4413,7 @@ ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev, unsigned int network_hdr_len, mac_hdr_len; /* Make certain the headers can be described by a context descriptor */ - mac_hdr_len = skb_network_header(skb) - skb->data; + mac_hdr_len = skb_network_offset(skb); if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN)) return features & ~(NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC | |