aboutsummaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/intel/ice
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/intel/ice')
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile4
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h54
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h411
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c987
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h63
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.c646
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ddp.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h10
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.c2117
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dpll.h113
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c50
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.c1346
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch_br.h120
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c232
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.c5
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flow.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fw_update.c45
-rw-r--r--drivers/net/ethernet/intel/ice/ice_gnss.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h56
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c1917
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c188
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c293
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c721
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h46
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c1180
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h103
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c95
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h23
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.c314
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sriov.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c356
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h90
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h41
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.c487
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_lib_private.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c191
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c123
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c84
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c243
58 files changed, 11069 insertions, 2183 deletions
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 817977e3039d..0679907980f7 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -43,9 +43,9 @@ ice-$(CONFIG_PCI_IOV) += \
ice_vf_mbx.o \
ice_vf_vsi_vlan_ops.o \
ice_vf_lib.o
-ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
+ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o ice_dpll.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
-ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o ice_eswitch_br.o
ice-$(CONFIG_GNSS) += ice_gnss.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 4ba3d99439a0..351e0d36df44 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -76,6 +76,7 @@
#include "ice_vsi_vlan_ops.h"
#include "ice_gnss.h"
#include "ice_irq.h"
+#include "ice_dpll.h"
#define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32
@@ -195,11 +196,16 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+#define ice_pf_src_tmr_owned(pf) ((pf)->hw.func_caps.ts_func_info.src_tmr_owned)
+
enum ice_feature {
ICE_F_DSCP,
- ICE_F_PTP_EXTTS,
+ ICE_F_PHY_RCLK,
ICE_F_SMA_CTRL,
+ ICE_F_CGU,
ICE_F_GNSS,
+ ICE_F_ROCE_LAG,
+ ICE_F_SRIOV_LAG,
ICE_F_MAX
};
@@ -370,6 +376,7 @@ struct ice_vsi {
u16 rx_buf_len;
struct ice_aqc_vsi_props info; /* VSI properties */
+ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
/* VSI stats */
struct rtnl_link_stats64 net_stats;
@@ -505,6 +512,7 @@ enum ice_pf_flags {
ICE_FLAG_UNPLUG_AUX_DEV,
ICE_FLAG_MTU_CHANGED,
ICE_FLAG_GNSS, /* GNSS successfully initialized */
+ ICE_FLAG_DPLL, /* SyncE/PTP dplls initialized */
ICE_PF_FLAGS_NBITS /* must be last */
};
@@ -517,6 +525,7 @@ enum ice_misc_thread_tasks {
struct ice_switchdev_info {
struct ice_vsi *control_vsi;
struct ice_vsi *uplink_vsi;
+ struct ice_esw_br_offloads *br_offloads;
bool is_running;
};
@@ -545,6 +554,8 @@ struct ice_pf {
* MSIX vectors allowed on this PF.
*/
u16 sriov_base_vector;
+ unsigned long *sriov_irq_bm; /* bitmap to track irq usage */
+ u16 sriov_irq_size; /* size of the irq_bm bitmap */
u16 ctrl_vsi_idx; /* control VSI index in pf->vsi array */
@@ -567,6 +578,7 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
+ struct mutex lag_mutex; /* protect ice_lag struct in PF */
u32 msg_enable;
struct ice_ptp ptp;
struct gnss_serial *gnss_serial;
@@ -626,6 +638,7 @@ struct ice_pf {
struct ice_lag *lag; /* Link Aggregation information */
struct ice_switchdev_info switchdev;
+ struct ice_esw_br_port *br_port;
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
@@ -634,8 +647,11 @@ struct ice_pf {
#define ICE_VF_AGG_NODE_ID_START 65
#define ICE_MAX_VF_AGG_NODES 32
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
+ struct ice_dplls dplls;
};
+extern struct workqueue_struct *ice_lag_wq;
+
struct ice_netdev_priv {
struct ice_vsi *vsi;
struct ice_repr *repr;
@@ -660,6 +676,18 @@ static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
}
/**
+ * ice_ptp_pf_handles_tx_interrupt - Check if PF handles Tx interrupt
+ * @pf: Board private structure
+ *
+ * Return true if this PF should respond to the Tx timestamp interrupt
+ * indication in the miscellaneous OICR interrupt handler.
+ */
+static inline bool ice_ptp_pf_handles_tx_interrupt(struct ice_pf *pf)
+{
+ return pf->ptp.tx_interrupt_mode != ICE_PTP_TX_INTERRUPT_NONE;
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -853,7 +881,7 @@ static inline bool ice_is_adq_active(struct ice_pf *pf)
return false;
}
-bool netif_is_ice(struct net_device *dev);
+bool netif_is_ice(const struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
@@ -909,14 +937,32 @@ void ice_fdir_release_flows(struct ice_hw *hw);
void ice_fdir_replay_flows(struct ice_hw *hw);
void ice_fdir_replay_fltrs(struct ice_pf *pf);
int ice_fdir_create_dflt_rules(struct ice_pf *pf);
-int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
- struct ice_rq_event_info *event);
+
+enum ice_aq_task_state {
+ ICE_AQ_TASK_NOT_PREPARED,
+ ICE_AQ_TASK_WAITING,
+ ICE_AQ_TASK_COMPLETE,
+ ICE_AQ_TASK_CANCELED,
+};
+
+struct ice_aq_task {
+ struct hlist_node entry;
+ struct ice_rq_event_info event;
+ enum ice_aq_task_state state;
+ u16 opcode;
+};
+
+void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ u16 opcode);
+int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ unsigned long timeout);
int ice_open(struct net_device *netdev);
int ice_open_internal(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
void ice_service_task_schedule(struct ice_pf *pf);
int ice_load(struct ice_pf *pf);
void ice_unload(struct ice_pf *pf);
+void ice_adv_lnk_speed_maps_init(void);
/**
* ice_set_rdma_cap - enable RDMA support
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 63d3e1dcbba5..d7fdb7ba7268 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG 0x01
+#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
@@ -232,6 +235,8 @@ struct ice_aqc_set_port_params {
#define ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA BIT(2)
__le16 bad_frame_vsi;
__le16 swid;
+#define ICE_AQC_PORT_SWID_VALID BIT(15)
+#define ICE_AQC_PORT_SWID_M 0xFF
u8 reserved[10];
};
@@ -241,10 +246,12 @@ struct ice_aqc_set_port_params {
* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
* Get Allocated Resource Descriptors Command (indirect 0x020A)
+ * Share Resource command (indirect 0x020B)
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
#define ICE_AQC_RES_TYPE_RECIPE 0x05
+#define ICE_AQC_RES_TYPE_SWID 0x07
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -264,6 +271,7 @@ struct ice_aqc_set_port_params {
/* Allocate Resources command (indirect 0x0208)
* Free Resources command (indirect 0x0209)
+ * Share Resource command (indirect 0x020B)
*/
struct ice_aqc_alloc_free_res_cmd {
__le16 num_entries; /* Number of Resource entries */
@@ -818,7 +826,11 @@ struct ice_aqc_txsched_move_grp_info_hdr {
__le32 src_parent_teid;
__le32 dest_parent_teid;
__le16 num_elems;
- __le16 reserved;
+ u8 mode;
+#define ICE_AQC_MOVE_ELEM_MODE_SAME_PF 0x0
+#define ICE_AQC_MOVE_ELEM_MODE_GIVE_OWN 0x1
+#define ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN 0x2
+ u8 reserved;
};
struct ice_aqc_move_elem {
@@ -1087,7 +1099,15 @@ struct ice_aqc_get_phy_caps {
#define ICE_PHY_TYPE_HIGH_100G_CAUI2 BIT_ULL(2)
#define ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC BIT_ULL(3)
#define ICE_PHY_TYPE_HIGH_100G_AUI2 BIT_ULL(4)
-#define ICE_PHY_TYPE_HIGH_MAX_INDEX 4
+#define ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 BIT_ULL(5)
+#define ICE_PHY_TYPE_HIGH_200G_SR4 BIT_ULL(6)
+#define ICE_PHY_TYPE_HIGH_200G_FR4 BIT_ULL(7)
+#define ICE_PHY_TYPE_HIGH_200G_LR4 BIT_ULL(8)
+#define ICE_PHY_TYPE_HIGH_200G_DR4 BIT_ULL(9)
+#define ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 BIT_ULL(10)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC BIT_ULL(11)
+#define ICE_PHY_TYPE_HIGH_200G_AUI4 BIT_ULL(12)
+#define ICE_PHY_TYPE_HIGH_MAX_INDEX 12
struct ice_aqc_get_phy_caps_data {
__le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
@@ -1307,11 +1327,41 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_SPEED_40GB BIT(8)
#define ICE_AQ_LINK_SPEED_50GB BIT(9)
#define ICE_AQ_LINK_SPEED_100GB BIT(10)
+#define ICE_AQ_LINK_SPEED_200GB BIT(11)
#define ICE_AQ_LINK_SPEED_UNKNOWN BIT(15)
- __le32 reserved3; /* Aligns next field to 8-byte boundary */
- __le64 phy_type_low; /* Use values from ICE_PHY_TYPE_LOW_* */
- __le64 phy_type_high; /* Use values from ICE_PHY_TYPE_HIGH_* */
-};
+ /* Aligns next field to 8-byte boundary */
+ __le16 reserved3;
+ u8 ext_fec_status;
+ /* RS 272 FEC enabled */
+#define ICE_AQ_LINK_RS_272_FEC_EN BIT(0)
+ u8 reserved4;
+ /* Use values from ICE_PHY_TYPE_LOW_* */
+ __le64 phy_type_low;
+ /* Use values from ICE_PHY_TYPE_HIGH_* */
+ __le64 phy_type_high;
+#define ICE_AQC_LS_DATA_SIZE_V1 \
+ offsetofend(struct ice_aqc_get_link_status_data, phy_type_high)
+ /* Get link status v2 link partner data */
+ __le64 lp_phy_type_low;
+ __le64 lp_phy_type_high;
+ u8 lp_fec_adv;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define ICE_AQ_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define ICE_AQ_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define ICE_AQ_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define ICE_AQ_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define ICE_AQ_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define ICE_AQ_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define ICE_AQ_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define ICE_AQ_LINK_LP_PAUSE_ADV BIT(0)
+#define ICE_AQ_LINK_LP_ASM_DIR_ADV BIT(1)
+#define ICE_AQC_LS_DATA_SIZE_V2 \
+ offsetofend(struct ice_aqc_get_link_status_data, lp_flowcontrol)
+} __packed;
/* Set event mask command (direct 0x0613) */
struct ice_aqc_set_event_mask {
@@ -1339,6 +1389,30 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
+/* Set PHY recovered clock output (direct 0x0630) */
+struct ice_aqc_set_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd;
+ __le32 freq;
+ u8 rsvd2[6];
+ __le16 node_handle;
+};
+
+/* Get PHY recovered clock output (direct 0x0631) */
+struct ice_aqc_get_phy_rec_clk_out {
+ u8 phy_output;
+ u8 port_num;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_CURR_PORT 0xFF
+ u8 flags;
+#define ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN BIT(0)
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
@@ -1355,6 +1429,9 @@ struct ice_aqc_link_topo_params {
#define ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE 6
#define ICE_AQC_LINK_TOPO_NODE_TYPE_MEZZ 7
#define ICE_AQC_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL 9
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX 10
+#define ICE_AQC_LINK_TOPO_NODE_TYPE_GPS 11
#define ICE_AQC_LINK_TOPO_NODE_CTX_S 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_M \
(0xF << ICE_AQC_LINK_TOPO_NODE_CTX_S)
@@ -1391,7 +1468,13 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
-#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 0x24
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 0x25
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY 0x30
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_C827 0x31
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX 0x47
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
u8 rsvd[9];
};
@@ -1781,11 +1864,10 @@ struct ice_aqc_lldp_filter_ctrl {
u8 reserved2[12];
};
+#define ICE_AQC_RSS_VSI_VALID BIT(15)
+
/* Get/Set RSS key (indirect 0x0B04/0x0B02) */
struct ice_aqc_get_set_rss_key {
-#define ICE_AQC_GSET_RSS_KEY_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_KEY_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_KEY_VSI_ID_S)
__le16 vsi_id;
u8 reserved[6];
__le32 addr_high;
@@ -1803,35 +1885,33 @@ struct ice_aqc_get_set_rss_keys {
u8 extended_hash_key[ICE_AQC_GET_SET_RSS_KEY_DATA_HASH_KEY_SIZE];
};
-/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
-struct ice_aqc_get_set_rss_lut {
-#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
-#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x3FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
- __le16 vsi_id;
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S)
-
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL 2
+enum ice_lut_type {
+ ICE_LUT_VSI = 0,
+ ICE_LUT_PF = 1,
+ ICE_LUT_GLOBAL = 2,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S 2
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M \
- (0x3 << ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S)
+enum ice_lut_size {
+ ICE_LUT_VSI_SIZE = 64,
+ ICE_LUT_GLOBAL_SIZE = 512,
+ ICE_LUT_PF_SIZE = 2048,
+};
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128 128
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG 0
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512 512
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG 1
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K 2048
-#define ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG 2
+/* enum ice_aqc_lut_flags combines constants used to fill
+ * &ice_aqc_get_set_rss_lut ::flags, which is an amalgamation of global LUT ID,
+ * LUT size and LUT type, last of which does not need neither shift nor mask.
+ */
+enum ice_aqc_lut_flags {
+ ICE_AQC_LUT_SIZE_SMALL = 0, /* size = 64 or 128 */
+ ICE_AQC_LUT_SIZE_512 = BIT(2),
+ ICE_AQC_LUT_SIZE_2K = BIT(3),
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S 4
-#define ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M \
- (0xF << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S)
+ ICE_AQC_LUT_GLOBAL_IDX = GENMASK(7, 4),
+};
+/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
+struct ice_aqc_get_set_rss_lut {
+ __le16 vsi_id;
__le16 flags;
__le32 reserved;
__le32 addr_high;
@@ -1923,6 +2003,42 @@ struct ice_aqc_dis_txq_item {
__le16 q_id[];
} __packed;
+/* Move/Reconfigure Tx queue (indirect 0x0C32) */
+struct ice_aqc_cfg_txqs {
+ u8 cmd_type;
+#define ICE_AQC_Q_CFG_MOVE_NODE 0x1
+#define ICE_AQC_Q_CFG_TC_CHNG 0x2
+#define ICE_AQC_Q_CFG_MOVE_TC_CHNG 0x3
+#define ICE_AQC_Q_CFG_SUBSEQ_CALL BIT(2)
+#define ICE_AQC_Q_CFG_FLUSH BIT(3)
+ u8 num_qs;
+ u8 port_num_chng;
+#define ICE_AQC_Q_CFG_SRC_PRT_M 0x7
+#define ICE_AQC_Q_CFG_DST_PRT_S 3
+#define ICE_AQC_Q_CFG_DST_PRT_M (0x7 << ICE_AQC_Q_CFG_DST_PRT_S)
+ u8 time_out;
+#define ICE_AQC_Q_CFG_TIMEOUT_S 2
+#define ICE_AQC_Q_CFG_TIMEOUT_M (0x1F << ICE_AQC_Q_CFG_TIMEOUT_S)
+ __le32 blocked_cgds;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Per Q struct for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
+struct ice_aqc_cfg_txq_perq {
+ __le16 q_handle;
+ u8 tc;
+ u8 rsvd;
+ __le32 q_teid;
+};
+
+/* The buffer for Move/Reconfigure Tx LAN Queues (indirect 0x0C32) */
+struct ice_aqc_cfg_txqs_buf {
+ __le32 src_parent_teid;
+ __le32 dst_parent_teid;
+ struct ice_aqc_cfg_txq_perq queue_info[];
+};
+
/* Add Tx RDMA Queue Set (indirect 0x0C33) */
struct ice_aqc_add_rdma_qset {
u8 num_qset_grps;
@@ -2079,6 +2195,193 @@ struct ice_aqc_get_pkg_info_resp {
struct ice_aqc_get_pkg_info pkg_info[];
};
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ice_aqc_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 eec_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+/* Set CGU input config (direct 0x0C62) */
+struct ice_aqc_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ice_aqc_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define ICE_AQC_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU output config (direct 0x0C64) */
+struct ice_aqc_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define ICE_AQC_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define ICE_AQC_SET_CGU_OUT_CFG_DPLL_SRC_SEL ICE_M(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU output config (direct 0x0C65) */
+struct ice_aqc_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define ICE_AQC_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define ICE_AQC_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ ICE_M(0x1F, ICE_AQC_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ice_aqc_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ u8 dpll_state;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define ICE_AQC_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+ u8 config;
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_GET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define ICE_AQC_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ice_aqc_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_CLK_REF_SEL ICE_M(0x1F, 0)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT 5
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE \
+ ICE_M(0x7, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_FREERUN 0
+#define ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_AUTOMATIC \
+ ICE_M(0x3, ICE_AQC_SET_CGU_DPLL_CONFIG_MODE_SHIFT)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ice_aqc_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ice_aqc_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+/* Get CGU info (direct 0x0C6A) */
+struct ice_aqc_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
/* Driver Shared Parameters (direct, 0x0C90) */
struct ice_aqc_driver_shared_params {
u8 set_or_get_op;
@@ -2093,16 +2396,6 @@ struct ice_aqc_driver_shared_params {
__le32 addr_low;
};
-enum ice_aqc_driver_params {
- /* OS clock index for PTP timer Domain 0 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0 = 0,
- /* OS clock index for PTP timer Domain 1 */
- ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1,
-
- /* Add new parameters above */
- ICE_AQC_DRIVER_PARAM_MAX = 16,
-};
-
/* Lan Queue Overflow Event (direct, 0x1001) */
struct ice_aqc_event_lan_overflow {
__le32 prtdcb_ruptq;
@@ -2148,6 +2441,8 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_set_phy_rec_clk_out set_phy_rec_clk_out;
+ struct ice_aqc_get_phy_rec_clk_out get_phy_rec_clk_out;
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
@@ -2181,12 +2476,22 @@ struct ice_aq_desc {
struct ice_aqc_neigh_dev_req neigh_dev;
struct ice_aqc_add_txqs add_txqs;
struct ice_aqc_dis_txqs dis_txqs;
+ struct ice_aqc_cfg_txqs cfg_txqs;
struct ice_aqc_add_rdma_qset add_rdma_qset;
struct ice_aqc_add_get_update_free_vsi vsi_cmd;
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
struct ice_aqc_download_pkg download_pkg;
+ struct ice_aqc_set_cgu_input_config set_cgu_input_config;
+ struct ice_aqc_get_cgu_input_config get_cgu_input_config;
+ struct ice_aqc_set_cgu_output_config set_cgu_output_config;
+ struct ice_aqc_get_cgu_output_config get_cgu_output_config;
+ struct ice_aqc_get_cgu_dpll_status get_cgu_dpll_status;
+ struct ice_aqc_set_cgu_dpll_config set_cgu_dpll_config;
+ struct ice_aqc_set_cgu_ref_prio set_cgu_ref_prio;
+ struct ice_aqc_get_cgu_ref_prio get_cgu_ref_prio;
+ struct ice_aqc_get_cgu_info get_cgu_info;
struct ice_aqc_driver_shared_params drv_shared_params;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
@@ -2263,6 +2568,7 @@ enum ice_adminq_opc {
/* Alloc/Free/Get Resources */
ice_aqc_opc_alloc_res = 0x0208,
ice_aqc_opc_free_res = 0x0209,
+ ice_aqc_opc_share_res = 0x020B,
ice_aqc_opc_set_vlan_mode_parameters = 0x020C,
ice_aqc_opc_get_vlan_mode_parameters = 0x020D,
@@ -2310,6 +2616,8 @@ enum ice_adminq_opc {
ice_aqc_opc_get_link_status = 0x0607,
ice_aqc_opc_set_event_mask = 0x0613,
ice_aqc_opc_set_mac_lb = 0x0620,
+ ice_aqc_opc_set_phy_rec_clk_out = 0x0630,
+ ice_aqc_opc_get_phy_rec_clk_out = 0x0631,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
@@ -2356,6 +2664,7 @@ enum ice_adminq_opc {
/* Tx queue handling commands/events */
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+ ice_aqc_opc_cfg_txqs = 0x0C32,
ice_aqc_opc_add_rdma_qset = 0x0C33,
/* package commands */
@@ -2364,6 +2673,18 @@ enum ice_adminq_opc {
ice_aqc_opc_update_pkg = 0x0C42,
ice_aqc_opc_get_pkg_info_list = 0x0C43,
+ /* 1588/SyncE commands/events */
+ ice_aqc_opc_get_cgu_abilities = 0x0C61,
+ ice_aqc_opc_set_cgu_input_config = 0x0C62,
+ ice_aqc_opc_get_cgu_input_config = 0x0C63,
+ ice_aqc_opc_set_cgu_output_config = 0x0C64,
+ ice_aqc_opc_get_cgu_output_config = 0x0C65,
+ ice_aqc_opc_get_cgu_dpll_status = 0x0C66,
+ ice_aqc_opc_set_cgu_dpll_config = 0x0C67,
+ ice_aqc_opc_set_cgu_ref_prio = 0x0C68,
+ ice_aqc_opc_get_cgu_ref_prio = 0x0C69,
+ ice_aqc_opc_get_cgu_info = 0x0C6A,
+
ice_aqc_opc_driver_shared_params = 0x0C90,
/* Standalone Commands/Events */
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 074bf9403cd1..7fa43827a3f0 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -408,7 +408,6 @@ static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
*/
static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
- int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
u32 rxdid = ICE_RXDID_FLEX_NIC;
struct ice_rlan_ctx rlan_ctx;
@@ -473,17 +472,11 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
*/
rlan_ctx.showiv = 0;
- /* For AF_XDP ZC, we disallow packets to span on
- * multiple buffers, thus letting us skip that
- * handling in the fast-path.
- */
- if (ring->xsk_pool)
- chain_len = 1;
/* Max packet size for this queue - must not be set to a larger value
* than 5 x DBUF
*/
rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
- chain_len * ring->rx_buf_len);
+ ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
/* Rx queue threshold in units of 64 */
rlan_ctx.lrxqthresh = 1;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index e16d4c83ed5f..9a6c25f98632 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#include "ice_common.h"
#include "ice_sched.h"
#include "ice_adminq_cmd.h"
#include "ice_flow.h"
+#include "ice_ptp_hw.h"
#define ICE_PF_RESET_WAIT_COUNT 300
+#define ICE_MAX_NETLIST_SIZE 10
static const char * const ice_link_mode_str_low[] = {
[0] = "100BASE_TX",
@@ -152,6 +154,12 @@ static int ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E823L_SFP:
hw->mac_type = ICE_MAC_GENERIC;
break;
+ case ICE_DEV_ID_E830_BACKPLANE:
+ case ICE_DEV_ID_E830_QSFP56:
+ case ICE_DEV_ID_E830_SFP:
+ case ICE_DEV_ID_E830_SFP_DD:
+ hw->mac_type = ICE_MAC_E830;
+ break;
default:
hw->mac_type = ICE_MAC_UNKNOWN;
break;
@@ -435,6 +443,80 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
}
/**
+ * ice_aq_get_netlist_node
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get netlist node handle.
+ */
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ice_aq_send_cmd(hw, &desc, NULL, 0, NULL))
+ return -EINTR;
+
+ if (node_handle)
+ *node_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return 0;
+}
+
+/**
+ * ice_find_netlist_node
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Scan the netlist for a node handle of the given node type and part number.
+ *
+ * If node_handle is non-NULL it will be modified on function exit. It is only
+ * valid if the function returns zero, and should be ignored on any non-zero
+ * return value.
+ *
+ * Returns: 0 if the node is found, -ENOENT if no handle was found, and
+ * a negative error code on failure to access the AQ.
+ */
+static int ice_find_netlist_node(struct ice_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ u8 idx;
+
+ for (idx = 0; idx < ICE_MAX_NETLIST_SIZE; idx++) {
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 rec_node_part_number;
+ int status;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M,
+ node_type_ctx);
+ cmd.addr.topo_params.index = idx;
+
+ status = ice_aq_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number)
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+/**
* ice_is_media_cage_present
* @pi: port information structure
*
@@ -570,6 +652,24 @@ static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
}
/**
+ * ice_get_link_status_datalen
+ * @hw: pointer to the HW struct
+ *
+ * Returns datalength for the Get Link Status AQ command, which is bigger for
+ * newer adapter families handled by ice driver.
+ */
+static u16 ice_get_link_status_datalen(struct ice_hw *hw)
+{
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ return ICE_AQC_LS_DATA_SIZE_V2;
+ case ICE_MAC_E810:
+ default:
+ return ICE_AQC_LS_DATA_SIZE_V1;
+ }
+}
+
+/**
* ice_aq_get_link_info
* @pi: port information structure
* @ena_lse: enable/disable LinkStatusEvent reporting
@@ -607,8 +707,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport;
- status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
-
+ status = ice_aq_send_cmd(hw, &desc, &link_data,
+ ice_get_link_status_datalen(hw), cd);
if (status)
return status;
@@ -683,8 +783,7 @@ static void
ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
struct ice_aqc_set_mac_cfg *cmd)
{
- u16 fc_thres_val, tx_timer_val;
- u32 val;
+ u32 val, fc_thres_m;
/* We read back the transmit timer and FC threshold value of
* LFC. Thus, we will use index =
@@ -693,19 +792,32 @@ ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
* Also, because we are operating on transmit timer and FC
* threshold of LFC, we don't turn on any bit in tx_tmr_priority
*/
-#define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
-
- /* Retrieve the transmit timer */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
- tx_timer_val = val &
- PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
- cmd->tx_tmr_value = cpu_to_le16(tx_timer_val);
-
- /* Retrieve the FC threshold */
- val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
- fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
-
- cmd->fc_refresh_threshold = cpu_to_le16(fc_thres_val);
+#define E800_IDX_OF_LFC E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX
+#define E800_REFRESH_TMR E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR
+
+ if (hw->mac_type == ICE_MAC_E830) {
+ /* Retrieve the transmit timer */
+ val = rd32(hw, E830_PRTMAC_CL01_PS_QNT);
+ cmd->tx_tmr_value =
+ le16_encode_bits(val, E830_PRTMAC_CL01_PS_QNT_CL0_M);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw, E830_PRTMAC_CL01_QNT_THR);
+ fc_thres_m = E830_PRTMAC_CL01_QNT_THR_CL0_M;
+ } else {
+ /* Retrieve the transmit timer */
+ val = rd32(hw,
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT(E800_IDX_OF_LFC));
+ cmd->tx_tmr_value =
+ le16_encode_bits(val,
+ E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M);
+
+ /* Retrieve the fc threshold */
+ val = rd32(hw,
+ E800_REFRESH_TMR(E800_IDX_OF_LFC));
+ fc_thres_m = E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M;
+ }
+ cmd->fc_refresh_threshold = le16_encode_bits(val, fc_thres_m);
}
/**
@@ -1999,37 +2111,31 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
/**
* ice_aq_alloc_free_res - command to allocate/free resources
* @hw: pointer to the HW struct
- * @num_entries: number of resource entries in buffer
* @buf: Indirect buffer to hold data parameters and response
* @buf_size: size of buffer for indirect commands
* @opc: pass in the command opcode
- * @cd: pointer to command details structure or NULL
*
* Helper function to allocate/free resources using the admin queue commands
*/
-int
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd)
+int ice_aq_alloc_free_res(struct ice_hw *hw,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc)
{
struct ice_aqc_alloc_free_res_cmd *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.sw_res_ctrl;
- if (!buf)
- return -EINVAL;
-
- if (buf_size < flex_array_size(buf, elem, num_entries))
+ if (!buf || buf_size < flex_array_size(buf, elem, 1))
return -EINVAL;
ice_fill_dflt_direct_cmd_desc(&desc, opc);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- cmd->num_entries = cpu_to_le16(num_entries);
+ cmd->num_entries = cpu_to_le16(1);
- return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ return ice_aq_send_cmd(hw, &desc, buf, buf_size, NULL);
}
/**
@@ -2059,8 +2165,7 @@ ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
if (btm)
buf->res_type |= cpu_to_le16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
goto ice_alloc_res_exit;
@@ -2094,8 +2199,7 @@ int ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
buf->res_type = cpu_to_le16(type);
memcpy(buf->elem, res, sizeof(*buf->elem) * num);
- status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
- ice_aqc_opc_free_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
if (status)
ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
@@ -2241,6 +2345,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
+ case ICE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
+ prefix, caps->roce_lag);
+ caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
+ prefix, caps->sriov_lag);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -2388,16 +2500,21 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
static void
ice_parse_fdir_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p)
{
- u32 reg_val, val;
+ u32 reg_val, gsize, bsize;
reg_val = rd32(hw, GLQF_FD_SIZE);
- val = (reg_val & GLQF_FD_SIZE_FD_GSIZE_M) >>
- GLQF_FD_SIZE_FD_GSIZE_S;
- func_p->fd_fltr_guar =
- ice_get_num_per_func(hw, val);
- val = (reg_val & GLQF_FD_SIZE_FD_BSIZE_M) >>
- GLQF_FD_SIZE_FD_BSIZE_S;
- func_p->fd_fltr_best_effort = val;
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ gsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
+ bsize = FIELD_GET(E830_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
+ break;
+ case ICE_MAC_E810:
+ default:
+ gsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_GSIZE_M, reg_val);
+ bsize = FIELD_GET(E800_GLQF_FD_SIZE_FD_BSIZE_M, reg_val);
+ }
+ func_p->fd_fltr_guar = ice_get_num_per_func(hw, gsize);
+ func_p->fd_fltr_best_effort = bsize;
ice_debug(hw, ICE_DBG_INIT, "func caps: fd_fltr_guar = %d\n",
func_p->fd_fltr_guar);
@@ -2654,6 +2771,116 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
}
/**
+ * ice_is_pf_c827 - check if pf contains c827 phy
+ * @hw: pointer to the hw struct
+ */
+bool ice_is_pf_c827(struct ice_hw *hw)
+{
+ struct ice_aqc_get_link_topo cmd = {};
+ u8 node_part_number;
+ u16 node_handle;
+ int status;
+
+ if (hw->mac_type != ICE_MAC_E810)
+ return false;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP)
+ return true;
+
+ cmd.addr.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_TYPE_M, ICE_AQC_LINK_TOPO_NODE_TYPE_PHY) |
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M, ICE_AQC_LINK_TOPO_NODE_CTX_PORT);
+ cmd.addr.topo_params.index = 0;
+
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return false;
+
+ if (node_handle == E810C_QSFP_C827_0_HANDLE || node_handle == E810C_QSFP_C827_1_HANDLE)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_is_phy_rclk_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the PHY Recovered Clock device is present in the netlist
+ */
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_C827, NULL) &&
+ ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_E822_PHY, NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_clock_mux_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Multiplexer device is present in the netlist
+ */
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_MUX,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_CLK_MUX,
+ NULL))
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_is_cgu_in_netlist - check for CGU presence
+ * @hw: pointer to the hw struct
+ *
+ * Check if the Clock Generation Unit (CGU) device is present in the netlist.
+ * Save the CGU part number in the hw structure for later use.
+ * Return:
+ * * true - cgu is present
+ * * false - cgu is not present
+ */
+bool ice_is_cgu_in_netlist(struct ice_hw *hw)
+{
+ if (!ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032;
+ return true;
+ } else if (!ice_find_netlist_node(hw,
+ ICE_AQC_LINK_TOPO_NODE_TYPE_CLK_CTRL,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384,
+ NULL)) {
+ hw->cgu_part_number = ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384;
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_is_gps_in_netlist
+ * @hw: pointer to the hw struct
+ *
+ * Check if the GPS generic device is present in the netlist
+ */
+bool ice_is_gps_in_netlist(struct ice_hw *hw)
+{
+ if (ice_find_netlist_node(hw, ICE_AQC_LINK_TOPO_NODE_TYPE_GPS,
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_GEN_GPS, NULL))
+ return false;
+
+ return true;
+}
+
+/**
* ice_aq_list_caps - query function/device capabilities
* @hw: pointer to the HW struct
* @buf: a buffer to hold the capabilities
@@ -3869,6 +4096,34 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
return status;
}
+static enum ice_lut_size ice_lut_type_to_size(enum ice_lut_type type)
+{
+ switch (type) {
+ case ICE_LUT_VSI:
+ return ICE_LUT_VSI_SIZE;
+ case ICE_LUT_GLOBAL:
+ return ICE_LUT_GLOBAL_SIZE;
+ case ICE_LUT_PF:
+ return ICE_LUT_PF_SIZE;
+ }
+ WARN_ONCE(1, "incorrect type passed");
+ return ICE_LUT_VSI_SIZE;
+}
+
+static enum ice_aqc_lut_flags ice_lut_size_to_flag(enum ice_lut_size size)
+{
+ switch (size) {
+ case ICE_LUT_VSI_SIZE:
+ return ICE_AQC_LUT_SIZE_SMALL;
+ case ICE_LUT_GLOBAL_SIZE:
+ return ICE_AQC_LUT_SIZE_512;
+ case ICE_LUT_PF_SIZE:
+ return ICE_AQC_LUT_SIZE_2K;
+ }
+ WARN_ONCE(1, "incorrect size passed");
+ return 0;
+}
+
/**
* __ice_aq_get_set_rss_lut
* @hw: pointer to the hardware structure
@@ -3878,95 +4133,44 @@ ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
* Internal function to get (0x0B05) or set (0x0B03) RSS look up table
*/
static int
-__ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
-{
- u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
- struct ice_aqc_get_set_rss_lut *cmd_resp;
+__ice_aq_get_set_rss_lut(struct ice_hw *hw,
+ struct ice_aq_get_set_rss_lut_params *params, bool set)
+{
+ u16 opcode, vsi_id, vsi_handle = params->vsi_handle, glob_lut_idx = 0;
+ enum ice_lut_type lut_type = params->lut_type;
+ struct ice_aqc_get_set_rss_lut *desc_params;
+ enum ice_aqc_lut_flags flags;
+ enum ice_lut_size lut_size;
struct ice_aq_desc desc;
- int status;
- u8 *lut;
-
- if (!params)
- return -EINVAL;
+ u8 *lut = params->lut;
- vsi_handle = params->vsi_handle;
- lut = params->lut;
- if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
+ if (!lut || !ice_is_vsi_valid(hw, vsi_handle))
return -EINVAL;
- lut_size = params->lut_size;
- lut_type = params->lut_type;
- glob_lut_idx = params->global_lut_id;
- vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
-
- cmd_resp = &desc.params.get_set_rss_lut;
+ lut_size = ice_lut_type_to_size(lut_type);
+ if (lut_size > params->lut_size)
+ return -EINVAL;
+ else if (set && lut_size != params->lut_size)
+ return -EINVAL;
- if (set) {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
+ opcode = set ? ice_aqc_opc_set_rss_lut : ice_aqc_opc_get_rss_lut;
+ ice_fill_dflt_direct_cmd_desc(&desc, opcode);
+ if (set)
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
- } else {
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
- }
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
- ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
- ICE_AQC_GSET_RSS_LUT_VSI_VALID);
-
- switch (lut_type) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
- case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
- flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
- break;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
+ desc_params = &desc.params.get_set_rss_lut;
+ vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
- flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
- ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
+ if (lut_type == ICE_LUT_GLOBAL)
+ glob_lut_idx = FIELD_PREP(ICE_AQC_LUT_GLOBAL_IDX,
+ params->global_lut_id);
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- if (!set)
- goto ice_aq_get_set_rss_lut_send;
- } else {
- goto ice_aq_get_set_rss_lut_send;
- }
+ flags = lut_type | glob_lut_idx | ice_lut_size_to_flag(lut_size);
+ desc_params->flags = cpu_to_le16(flags);
- /* LUT size is only valid for Global and PF table types */
- switch (lut_size) {
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
- if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
- flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
- ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
- break;
- }
- fallthrough;
- default:
- status = -EINVAL;
- goto ice_aq_get_set_rss_lut_exit;
- }
-
-ice_aq_get_set_rss_lut_send:
- cmd_resp->flags = cpu_to_le16(flags);
- status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
-
-ice_aq_get_set_rss_lut_exit:
- return status;
+ return ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
}
/**
@@ -4008,12 +4212,10 @@ static int
__ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
struct ice_aqc_get_set_rss_keys *key, bool set)
{
- struct ice_aqc_get_set_rss_key *cmd_resp;
+ struct ice_aqc_get_set_rss_key *desc_params;
u16 key_size = sizeof(*key);
struct ice_aq_desc desc;
- cmd_resp = &desc.params.get_set_rss_key;
-
if (set) {
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
@@ -4021,10 +4223,8 @@ __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
}
- cmd_resp->vsi_id = cpu_to_le16(((vsi_id <<
- ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
- ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
- ICE_AQC_GSET_RSS_KEY_VSI_VALID);
+ desc_params = &desc.params.get_set_rss_key;
+ desc_params->vsi_id = cpu_to_le16(vsi_id | ICE_AQC_RSS_VSI_VALID);
return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
}
@@ -4222,6 +4422,53 @@ do_aq:
}
/**
+ * ice_aq_cfg_lan_txq
+ * @hw: pointer to the hardware structure
+ * @buf: buffer for command
+ * @buf_size: size of buffer in bytes
+ * @num_qs: number of queues being configured
+ * @oldport: origination lport
+ * @newport: destination lport
+ * @cd: pointer to command details structure or NULL
+ *
+ * Move/Configure LAN Tx queue (0x0C32)
+ *
+ * There is a better AQ command to use for moving nodes, so only coding
+ * this one for configuring the node.
+ */
+int
+ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
+ u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_cfg_txqs *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ cmd = &desc.params.cfg_txqs;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_cfg_txqs);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (!buf)
+ return -EINVAL;
+
+ cmd->cmd_type = ICE_AQC_Q_CFG_TC_CHNG;
+ cmd->num_qs = num_qs;
+ cmd->port_num_chng = (oldport & ICE_AQC_Q_CFG_SRC_PRT_M);
+ cmd->port_num_chng |= (newport << ICE_AQC_Q_CFG_DST_PRT_S) &
+ ICE_AQC_Q_CFG_DST_PRT_M;
+ cmd->time_out = (5 << ICE_AQC_Q_CFG_TIMEOUT_S) &
+ ICE_AQC_Q_CFG_TIMEOUT_M;
+ cmd->blocked_cgds = 0;
+
+ status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
+ if (status)
+ ice_debug(hw, ICE_DBG_SCHED, "Failed to reconfigure nodes %d\n",
+ hw->adminq.sq_last_status);
+ return status;
+}
+
+/**
* ice_aq_add_rdma_qsets
* @hw: pointer to the hardware structure
* @num_qset_grps: Number of RDMA Qset groups
@@ -4644,11 +4891,11 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
enum ice_disq_rst_src rst_src, u16 vmvf_num,
struct ice_sq_cd *cd)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 i, buf_size = __struct_size(qg_list);
struct ice_q_ctx *q_ctx;
int status = -ENOENT;
struct ice_hw *hw;
- u16 i, buf_size;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
return -EIO;
@@ -4666,11 +4913,6 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
return -EIO;
}
- buf_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(buf_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < num_queues; i++) {
@@ -4700,9 +4942,9 @@ ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
break;
ice_free_sched_node(pi, node);
q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx->q_teid = ICE_INVAL_TEID;
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
return status;
}
@@ -4871,10 +5113,10 @@ int
ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
u16 *q_id)
{
- struct ice_aqc_dis_txq_item *qg_list;
+ DEFINE_FLEX(struct ice_aqc_dis_txq_item, qg_list, q_id, 1);
+ u16 qg_size = __struct_size(qg_list);
struct ice_hw *hw;
int status = 0;
- u16 qg_size;
int i;
if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
@@ -4882,11 +5124,6 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
hw = pi->hw;
- qg_size = struct_size(qg_list, q_id, 1);
- qg_list = kzalloc(qg_size, GFP_KERNEL);
- if (!qg_list)
- return -ENOMEM;
-
mutex_lock(&pi->sched_lock);
for (i = 0; i < count; i++) {
@@ -4911,7 +5148,395 @@ ice_dis_vsi_rdma_qset(struct ice_port_info *pi, u16 count, u32 *qset_teid,
}
mutex_unlock(&pi->sched_lock);
- kfree(qg_list);
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_abilities - get cgu abilities
+ * @hw: pointer to the HW struct
+ * @abilities: CGU abilities
+ *
+ * Get CGU abilities (0x0C61)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_abilities);
+ return ice_aq_send_cmd(hw, &desc, abilities, sizeof(*abilities), NULL);
+}
+
+/**
+ * ice_aq_set_input_pin_cfg - set input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Set CGU input config (0x0C62)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_input_config);
+ cmd = &desc.params.set_cgu_input_config;
+ cmd->input_idx = input_idx;
+ cmd->flags1 = flags1;
+ cmd->flags2 = flags2;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_input_pin_cfg - get input pin config
+ * @hw: pointer to the HW struct
+ * @input_idx: Input index
+ * @status: Pin status
+ * @type: Pin type
+ * @flags1: Input flags
+ * @flags2: Input flags
+ * @freq: Frequency in Hz
+ * @phase_delay: Delay in ps
+ *
+ * Get CGU input config (0x0C63)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay)
+{
+ struct ice_aqc_get_cgu_input_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_input_config);
+ cmd = &desc.params.get_cgu_input_config;
+ cmd->input_idx = input_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (status)
+ *status = cmd->status;
+ if (type)
+ *type = cmd->type;
+ if (flags1)
+ *flags1 = cmd->flags1;
+ if (flags2)
+ *flags2 = cmd->flags2;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (phase_delay)
+ *phase_delay = le32_to_cpu(cmd->phase_delay);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_set_output_pin_cfg - set output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Index of DPLL block
+ * @freq: Output frequency
+ * @phase_delay: Output phase compensation
+ *
+ * Set CGU output config (0x0C64)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay)
+{
+ struct ice_aqc_set_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_output_config);
+ cmd = &desc.params.set_cgu_output_config;
+ cmd->output_idx = output_idx;
+ cmd->flags = flags;
+ cmd->src_sel = src_sel;
+ cmd->freq = cpu_to_le32(freq);
+ cmd->phase_delay = cpu_to_le32(phase_delay);
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_output_pin_cfg - get output pin config
+ * @hw: pointer to the HW struct
+ * @output_idx: Output index
+ * @flags: Output flags
+ * @src_sel: Internal DPLL source
+ * @freq: Output frequency
+ * @src_freq: Source frequency
+ *
+ * Get CGU output config (0x0C65)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq)
+{
+ struct ice_aqc_get_cgu_output_config *cmd;
+ struct ice_aq_desc desc;
+ int ret;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_output_config);
+ cmd = &desc.params.get_cgu_output_config;
+ cmd->output_idx = output_idx;
+
+ ret = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!ret) {
+ if (flags)
+ *flags = cmd->flags;
+ if (src_sel)
+ *src_sel = cmd->src_sel;
+ if (freq)
+ *freq = le32_to_cpu(cmd->freq);
+ if (src_freq)
+ *src_freq = le32_to_cpu(cmd->src_freq);
+ }
+
+ return ret;
+}
+
+/**
+ * ice_aq_get_cgu_dpll_status - get dpll status
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: current DPLL config
+ * @dpll_state: current DPLL state
+ * @phase_offset: Phase offset in ns
+ * @eec_mode: EEC_mode
+ *
+ * Get CGU DPLL status (0x0C66)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode)
+{
+ struct ice_aqc_get_cgu_dpll_status *cmd;
+ const s64 nsec_per_psec = 1000LL;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_dpll_status);
+ cmd = &desc.params.get_cgu_dpll_status;
+ cmd->dpll_num = dpll_num;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *ref_state = cmd->ref_state;
+ *dpll_state = cmd->dpll_state;
+ *config = cmd->config;
+ *phase_offset = le32_to_cpu(cmd->phase_offset_h);
+ *phase_offset <<= 32;
+ *phase_offset += le32_to_cpu(cmd->phase_offset_l);
+ *phase_offset = div64_s64(sign_extend64(*phase_offset, 47),
+ nsec_per_psec);
+ *eec_mode = cmd->eec_mode;
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_cgu_dpll_config - set dpll config
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_state: Reference clock state
+ * @config: DPLL config
+ * @eec_mode: EEC mode
+ *
+ * Set CGU DPLL config (0x0C67)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode)
+{
+ struct ice_aqc_set_cgu_dpll_config *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_dpll_config);
+ cmd = &desc.params.set_cgu_dpll_config;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_state = ref_state;
+ cmd->config = config;
+ cmd->eec_mode = eec_mode;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_set_cgu_ref_prio - set input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_priority: Reference input priority
+ *
+ * Set CGU reference priority (0x0C68)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority)
+{
+ struct ice_aqc_set_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_cgu_ref_prio);
+ cmd = &desc.params.set_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+ cmd->ref_priority = ref_priority;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
+/**
+ * ice_aq_get_cgu_ref_prio - get input reference priority
+ * @hw: pointer to the HW struct
+ * @dpll_num: DPLL index
+ * @ref_idx: Reference pin index
+ * @ref_prio: Reference input priority
+ *
+ * Get CGU reference priority (0x0C69)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio)
+{
+ struct ice_aqc_get_cgu_ref_prio *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_ref_prio);
+ cmd = &desc.params.get_cgu_ref_prio;
+ cmd->dpll_num = dpll_num;
+ cmd->ref_idx = ref_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *ref_prio = cmd->ref_priority;
+
+ return status;
+}
+
+/**
+ * ice_aq_get_cgu_info - get cgu info
+ * @hw: pointer to the HW struct
+ * @cgu_id: CGU ID
+ * @cgu_cfg_ver: CGU config version
+ * @cgu_fw_ver: CGU firmware version
+ *
+ * Get CGU info (0x0C6A)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver)
+{
+ struct ice_aqc_get_cgu_info *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_cgu_info);
+ cmd = &desc.params.get_cgu_info;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *cgu_id = le32_to_cpu(cmd->cgu_id);
+ *cgu_cfg_ver = le32_to_cpu(cmd->cgu_cfg_ver);
+ *cgu_fw_ver = le32_to_cpu(cmd->cgu_fw_ver);
+ }
+
+ return status;
+}
+
+/**
+ * ice_aq_set_phy_rec_clk_out - set RCLK phy out
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @enable: GPIO state to be applied
+ * @freq: PHY output frequency
+ *
+ * Set phy recovered clock as reference (0x0630)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq)
+{
+ struct ice_aqc_set_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_rec_clk_out);
+ cmd = &desc.params.set_phy_rec_clk_out;
+ cmd->phy_output = phy_output;
+ cmd->port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ cmd->flags = enable & ICE_AQC_SET_PHY_REC_CLK_OUT_OUT_EN;
+ cmd->freq = cpu_to_le32(*freq);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status)
+ *freq = le32_to_cpu(cmd->freq);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_phy_rec_clk_out - get phy recovered signal info
+ * @hw: pointer to the HW struct
+ * @phy_output: PHY reference clock output pin
+ * @port_num: Port number
+ * @flags: PHY flags
+ * @node_handle: PHY output frequency
+ *
+ * Get PHY recovered clock output info (0x0631)
+ * Return: 0 on success or negative value on failure.
+ */
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle)
+{
+ struct ice_aqc_get_phy_rec_clk_out *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_rec_clk_out);
+ cmd = &desc.params.get_phy_rec_clk_out;
+ cmd->phy_output = *phy_output;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (!status) {
+ *phy_output = cmd->phy_output;
+ if (port_num)
+ *port_num = cmd->port_num;
+ if (flags)
+ *flags = cmd->flags;
+ if (node_handle)
+ *node_handle = le16_to_cpu(cmd->node_handle);
+ }
+
return status;
}
@@ -5185,81 +5810,6 @@ ice_aq_write_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
}
/**
- * ice_aq_set_driver_param - Set driver parameter to share via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: the value to set the parameter to
- * @cd: pointer to command details structure or NULL
- *
- * Set the value of one of the software defined parameters. All PFs connected
- * to this device can read the value using ice_aq_get_driver_param.
- *
- * Note that firmware provides no synchronization or locking, and will not
- * save the parameter value during a device reset. It is expected that
- * a single PF will write the parameter value, while all other PFs will only
- * read it.
- */
-int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_SET;
- cmd->param_indx = idx;
- cmd->param_val = cpu_to_le32(value);
-
- return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
-}
-
-/**
- * ice_aq_get_driver_param - Get driver parameter shared via firmware
- * @hw: pointer to the HW struct
- * @idx: parameter index to set
- * @value: storage to return the shared parameter
- * @cd: pointer to command details structure or NULL
- *
- * Get the value of one of the software defined parameters.
- *
- * Note that firmware provides no synchronization or locking. It is expected
- * that only a single PF will write a given parameter.
- */
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd)
-{
- struct ice_aqc_driver_shared_params *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (idx >= ICE_AQC_DRIVER_PARAM_MAX)
- return -EIO;
-
- cmd = &desc.params.drv_shared_params;
-
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_shared_params);
-
- cmd->set_or_get_op = ICE_AQC_DRIVER_PARAM_GET;
- cmd->param_indx = idx;
-
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
- if (status)
- return status;
-
- *value = le32_to_cpu(cmd->param_val);
-
- return 0;
-}
-
-/**
* ice_aq_set_gpio
* @hw: pointer to the hw struct
* @gpio_ctrl_handle: GPIO controller node handle
@@ -5560,6 +6110,7 @@ static const u32 ice_aq_to_link_speed[] = {
SPEED_40000,
SPEED_50000,
SPEED_100000, /* BIT(10) */
+ SPEED_200000,
};
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 81961a7d6598..31fdcac33986 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -38,10 +38,9 @@ int
ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res);
int
ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res);
-int
-ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
- struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
- enum ice_adminq_opc opc, struct ice_sq_cd *cd);
+int ice_aq_alloc_free_res(struct ice_hw *hw,
+ struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
+ enum ice_adminq_opc opc);
bool ice_is_sbq_supported(struct ice_hw *hw);
struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw);
int
@@ -93,6 +92,14 @@ int
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
+bool ice_is_pf_c827(struct ice_hw *hw);
+bool ice_is_phy_rclk_in_netlist(struct ice_hw *hw);
+bool ice_is_clock_mux_in_netlist(struct ice_hw *hw);
+bool ice_is_cgu_in_netlist(struct ice_hw *hw);
+bool ice_is_gps_in_netlist(struct ice_hw *hw);
+int
+ice_aq_get_netlist_node(struct ice_hw *hw, struct ice_aqc_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
int
ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
enum ice_adminq_opc opc, struct ice_sq_cd *cd);
@@ -186,12 +193,54 @@ int
ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
struct ice_sq_cd *cd);
+int
+ice_aq_cfg_lan_txq(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *buf,
+ u16 buf_size, u16 num_qs, u8 oldport, u8 newport,
+ struct ice_sq_cd *cd);
int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in);
+int
+ice_aq_get_cgu_abilities(struct ice_hw *hw,
+ struct ice_aqc_get_cgu_abilities *abilities);
+int
+ice_aq_set_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 flags1, u8 flags2,
+ u32 freq, s32 phase_delay);
+int
+ice_aq_get_input_pin_cfg(struct ice_hw *hw, u8 input_idx, u8 *status, u8 *type,
+ u8 *flags1, u8 *flags2, u32 *freq, s32 *phase_delay);
+int
+ice_aq_set_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 flags,
+ u8 src_sel, u32 freq, s32 phase_delay);
+int
+ice_aq_get_output_pin_cfg(struct ice_hw *hw, u8 output_idx, u8 *flags,
+ u8 *src_sel, u32 *freq, u32 *src_freq);
+int
+ice_aq_get_cgu_dpll_status(struct ice_hw *hw, u8 dpll_num, u8 *ref_state,
+ u8 *dpll_state, u8 *config, s64 *phase_offset,
+ u8 *eec_mode);
+int
+ice_aq_set_cgu_dpll_config(struct ice_hw *hw, u8 dpll_num, u8 ref_state,
+ u8 config, u8 eec_mode);
+int
+ice_aq_set_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 ref_priority);
+int
+ice_aq_get_cgu_ref_prio(struct ice_hw *hw, u8 dpll_num, u8 ref_idx,
+ u8 *ref_prio);
+int
+ice_aq_get_cgu_info(struct ice_hw *hw, u32 *cgu_id, u32 *cgu_cfg_ver,
+ u32 *cgu_fw_ver);
+
+int
+ice_aq_set_phy_rec_clk_out(struct ice_hw *hw, u8 phy_output, bool enable,
+ u32 *freq);
+int
+ice_aq_get_phy_rec_clk_out(struct ice_hw *hw, u8 *phy_output, u8 *port_num,
+ u8 *flags, u16 *node_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
@@ -204,12 +253,6 @@ int
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
int
-ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 value, struct ice_sq_cd *cd);
-int
-ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
- u32 *value, struct ice_sq_cd *cd);
-int
ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
struct ice_sq_cd *cd);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 3eb01731e496..e1fbc6de452d 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -70,6 +70,11 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
@@ -170,6 +175,11 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
if (mode == pf->dcbx_cap)
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
qos_cfg = &pf->hw.port_info->qos_cfg;
/* DSCP configuration is not DCBx negotiated */
@@ -261,6 +271,11 @@ static int ice_dcbnl_setpfc(struct net_device *netdev, struct ieee_pfc *pfc)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
return -EINVAL;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
mutex_lock(&pf->tc_mutex);
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
@@ -323,6 +338,11 @@ static void ice_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio, u8 set)
if (prio >= ICE_MAX_USER_PRIORITY)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->pfc.pfccap = pf->hw.func_caps.common_cap.maxtc;
@@ -379,6 +399,11 @@ static u8 ice_dcbnl_setstate(struct net_device *netdev, u8 state)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
/* Nothing to do */
if (!!state == test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return ICE_DCB_NO_HW_CHG;
@@ -451,6 +476,11 @@ ice_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (tc >= ICE_MAX_TRAFFIC_CLASS)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
/* prio_type, bwg_id and bw_pct per UP are not supported */
@@ -505,6 +535,11 @@ ice_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int pgid, u8 bw_pct)
if (pgid >= ICE_MAX_TRAFFIC_CLASS)
return;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
new_cfg->etscfg.tcbwtable[pgid] = bw_pct;
@@ -725,6 +760,11 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
max_tc = pf->hw.func_caps.common_cap.maxtc;
if (app->priority >= max_tc) {
netdev_err(netdev, "TC %d out of range, max TC %d\n",
@@ -836,6 +876,11 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
}
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return -EINVAL;
+ }
+
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
@@ -937,6 +982,11 @@ static u8 ice_dcbnl_cee_set_all(struct net_device *netdev)
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return ICE_DCB_NO_HW_CHG;
+ if (pf->lag && pf->lag->bonded) {
+ netdev_err(netdev, "DCB changes not allowed when in a bond\n");
+ return ICE_DCB_NO_HW_CHG;
+ }
+
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
mutex_lock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c
index d71ed210f9c4..8b7504a9df31 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.c
@@ -30,7 +30,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
* Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment.
*/
-enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
{
u32 seg_count;
u32 i;
@@ -118,7 +118,7 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
*
* This helper function validates a buffer's header.
*/
-struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
{
struct ice_buf_hdr *hdr;
u16 section_count;
@@ -1153,23 +1153,168 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
}
/**
- * ice_dwnld_cfg_bufs
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static int
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ int status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == -EIO) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_pkg_seg_by_idx
+ * @pkg_hdr: pointer to the package header to be searched
+ * @idx: index of segment
+ */
+static struct ice_generic_seg_hdr *
+ice_get_pkg_seg_by_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ if (idx < le32_to_cpu(pkg_hdr->seg_count))
+ return (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr +
+ le32_to_cpu(pkg_hdr->seg_offset[idx]));
+
+ return NULL;
+}
+
+/**
+ * ice_is_signing_seg_at_idx - determine if segment is a signing segment
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ */
+static bool ice_is_signing_seg_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx)
+{
+ struct ice_generic_seg_hdr *seg;
+
+ seg = ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return false;
+
+ return le32_to_cpu(seg->seg_type) == SEGMENT_TYPE_SIGNING;
+}
+
+/**
+ * ice_is_signing_seg_type_at_idx
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ *
+ * Determine if a segment is a signing segment of the correct type
+ */
+static bool
+ice_is_signing_seg_type_at_idx(struct ice_pkg_hdr *pkg_hdr, u32 idx,
+ u32 seg_id, u32 sign_type)
+{
+ struct ice_sign_seg *seg;
+
+ if (!ice_is_signing_seg_at_idx(pkg_hdr, idx))
+ return false;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+
+ if (seg && le32_to_cpu(seg->seg_id) == seg_id &&
+ le32_to_cpu(seg->sign_type) == sign_type)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_is_buffer_metadata - determine if package buffer is a metadata buffer
+ * @buf: pointer to buffer header
+ */
+static bool ice_is_buffer_metadata(struct ice_buf_hdr *buf)
+{
+ if (le32_to_cpu(buf->section_entry[0].type) & ICE_METADATA_BUF)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_is_last_download_buffer
+ * @buf: pointer to current buffer header
+ * @idx: index of the buffer in the current sequence
+ * @count: the buffer count in the current sequence
+ *
+ * Note: this routine should only be called if the buffer is not the last buffer
+ */
+static bool
+ice_is_last_download_buffer(struct ice_buf_hdr *buf, u32 idx, u32 count)
+{
+ struct ice_buf *next_buf;
+
+ if ((idx + 1) == count)
+ return true;
+
+ /* A set metadata flag in the next buffer will signal that the current
+ * buffer will be the last buffer downloaded
+ */
+ next_buf = ((struct ice_buf *)buf) + 1;
+
+ return ice_is_buffer_metadata((struct ice_buf_hdr *)next_buf);
+}
+
+/**
+ * ice_dwnld_cfg_bufs_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
- * @count: the number of buffers in the array
+ * @start: buffer index of first buffer to download
+ * @count: the number of buffers to download
+ * @indicate_last: if true, then set last buffer flag on last buffer download
*
- * Obtains global config lock and downloads the package configuration buffers
- * to the firmware. Metadata buffers are skipped, and the first metadata buffer
- * found indicates that the rest of the buffers are all metadata buffers.
+ * Downloads package configuration buffers to the firmware. Metadata buffers
+ * are skipped, and the first metadata buffer found indicates that the rest
+ * of the buffers are all metadata buffers.
*/
-static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
- struct ice_buf *bufs, u32 count)
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 start,
+ u32 count, bool indicate_last)
{
enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
struct ice_buf_hdr *bh;
enum ice_aq_err err;
u32 offset, info, i;
- int status;
if (!bufs || !count)
return ICE_DDP_PKG_ERR;
@@ -1178,43 +1323,25 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
* then there are no buffers to be downloaded, and the operation is
* considered a success.
*/
- bh = (struct ice_buf_hdr *)bufs;
+ bh = (struct ice_buf_hdr *)(bufs + start);
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
return ICE_DDP_PKG_SUCCESS;
- status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
- if (status) {
- if (status == -EALREADY)
- return ICE_DDP_PKG_ALREADY_LOADED;
- return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
- }
-
for (i = 0; i < count; i++) {
- bool last = ((i + 1) == count);
-
- if (!last) {
- /* check next buffer for metadata flag */
- bh = (struct ice_buf_hdr *)(bufs + i + 1);
+ bool last = false;
+ int status;
- /* A set metadata flag in the next buffer will signal
- * that the current buffer will be the last buffer
- * downloaded
- */
- if (le16_to_cpu(bh->section_count))
- if (le32_to_cpu(bh->section_entry[0].type) &
- ICE_METADATA_BUF)
- last = true;
- }
+ bh = (struct ice_buf_hdr *)(bufs + start + i);
- bh = (struct ice_buf_hdr *)(bufs + i);
+ if (indicate_last)
+ last = ice_is_last_download_buffer(bh, i, count);
status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
&offset, &info, NULL);
/* Save AQ status from download package */
if (status) {
- ice_debug(hw, ICE_DBG_PKG,
- "Pkg download failed: err %d off %d inf %d\n",
+ ice_debug(hw, ICE_DBG_PKG, "Pkg download failed: err %d off %d inf %d\n",
status, offset, info);
err = hw->adminq.sq_last_status;
state = ice_map_aq_err_to_ddp_state(err);
@@ -1225,50 +1352,231 @@ static enum ice_ddp_state ice_dwnld_cfg_bufs(struct ice_hw *hw,
break;
}
- if (!status) {
- status = ice_set_vlan_mode(hw);
- if (status)
- ice_debug(hw, ICE_DBG_PKG,
- "Failed to set VLAN mode: err %d\n", status);
+ return state;
+}
+
+/**
+ * ice_download_pkg_sig_seg - download a signature segment
+ * @hw: pointer to the hardware structure
+ * @seg: pointer to signature segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_sig_seg(struct ice_hw *hw, struct ice_sign_seg *seg)
+{
+ return ice_dwnld_cfg_bufs_no_lock(hw, seg->buf_tbl.buf_array, 0,
+ le32_to_cpu(seg->buf_tbl.buf_count),
+ false);
+}
+
+/**
+ * ice_download_pkg_config_seg - download a config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index
+ * @start: starting buffer
+ * @count: buffer count
+ *
+ * Note: idx must reference a ICE segment
+ */
+static enum ice_ddp_state
+ice_download_pkg_config_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx, u32 start, u32 count)
+{
+ struct ice_buf_table *bufs;
+ struct ice_seg *seg;
+ u32 buf_count;
+
+ seg = (struct ice_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg)
+ return ICE_DDP_PKG_ERR;
+
+ bufs = ice_find_buf_table(seg);
+ buf_count = le32_to_cpu(bufs->buf_count);
+
+ if (start >= buf_count || start + count > buf_count)
+ return ICE_DDP_PKG_ERR;
+
+ return ice_dwnld_cfg_bufs_no_lock(hw, bufs->buf_array, start, count,
+ true);
+}
+
+/**
+ * ice_dwnld_sign_and_cfg_segs - download a signing segment and config segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ * @idx: segment index (must be a signature segment)
+ *
+ * Note: idx must reference a signature segment
+ */
+static enum ice_ddp_state
+ice_dwnld_sign_and_cfg_segs(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ u32 idx)
+{
+ enum ice_ddp_state state;
+ struct ice_sign_seg *seg;
+ u32 conf_idx;
+ u32 start;
+ u32 count;
+
+ seg = (struct ice_sign_seg *)ice_get_pkg_seg_by_idx(pkg_hdr, idx);
+ if (!seg) {
+ state = ICE_DDP_PKG_ERR;
+ goto exit;
}
+ conf_idx = le32_to_cpu(seg->signed_seg_idx);
+ start = le32_to_cpu(seg->signed_buf_start);
+ count = le32_to_cpu(seg->signed_buf_count);
+
+ state = ice_download_pkg_sig_seg(hw, seg);
+ if (state)
+ goto exit;
+
+ state = ice_download_pkg_config_seg(hw, pkg_hdr, conf_idx, start,
+ count);
+
+exit:
+ return state;
+}
+
+/**
+ * ice_match_signing_seg - determine if a matching signing segment exists
+ * @pkg_hdr: pointer to package header
+ * @seg_id: segment id that is expected
+ * @sign_type: signing type
+ */
+static bool
+ice_match_signing_seg(struct ice_pkg_hdr *pkg_hdr, u32 seg_id, u32 sign_type)
+{
+ u32 i;
+
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ if (ice_is_signing_seg_type_at_idx(pkg_hdr, i, seg_id,
+ sign_type))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_post_dwnld_pkg_actions - perform post download package actions
+ * @hw: pointer to the hardware structure
+ */
+static enum ice_ddp_state
+ice_post_dwnld_pkg_actions(struct ice_hw *hw)
+{
+ int status;
+
+ status = ice_set_vlan_mode(hw);
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG, "Failed to set VLAN mode: err %d\n",
+ status);
+ return ICE_DDP_PKG_ERR;
+ }
+
+ return ICE_DDP_PKG_SUCCESS;
+}
+
+/**
+ * ice_download_pkg_with_sig_seg
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to package header
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_ddp_state
+ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ enum ice_aq_err aq_err = hw->adminq.sq_last_status;
+ enum ice_ddp_state state = ICE_DDP_PKG_ERR;
+ int status;
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_INIT, "Segment ID %d\n", hw->pkg_seg_id);
+ ice_debug(hw, ICE_DBG_INIT, "Signature type %d\n", hw->pkg_sign_type);
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ state = ICE_DDP_PKG_ALREADY_LOADED;
+ else
+ state = ice_map_aq_err_to_ddp_state(aq_err);
+ return state;
+ }
+
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ if (!ice_is_signing_seg_type_at_idx(pkg_hdr, i, hw->pkg_seg_id,
+ hw->pkg_sign_type))
+ continue;
+
+ state = ice_dwnld_sign_and_cfg_segs(hw, pkg_hdr, i);
+ if (state)
+ break;
+ }
+
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
ice_release_global_cfg_lock(hw);
return state;
}
/**
- * ice_aq_get_pkg_info_list
+ * ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
- * @pkg_info: the buffer which will receive the information list
- * @buf_size: the size of the pkg_info information buffer
- * @cd: pointer to command details structure or NULL
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
*
- * Get Package Info List (0x0C43)
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware.
*/
-static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
- struct ice_aqc_get_pkg_info_resp *pkg_info,
- u16 buf_size, struct ice_sq_cd *cd)
+static enum ice_ddp_state
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
- struct ice_aq_desc desc;
+ enum ice_ddp_state state;
+ struct ice_buf_hdr *bh;
+ int status;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+ if (!bufs || !count)
+ return ICE_DDP_PKG_ERR;
- return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return ICE_DDP_PKG_SUCCESS;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == -EALREADY)
+ return ICE_DDP_PKG_ALREADY_LOADED;
+ return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
+ }
+
+ state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
+ if (!state)
+ state = ice_post_dwnld_pkg_actions(hw);
+
+ ice_release_global_cfg_lock(hw);
+
+ return state;
}
/**
- * ice_download_pkg
+ * ice_download_pkg_without_sig_seg
* @hw: pointer to the hardware structure
* @ice_seg: pointer to the segment of the package to be downloaded
*
- * Handles the download of a complete package.
+ * Handles the download of a complete package without signature segment.
*/
-static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
- struct ice_seg *ice_seg)
+static enum ice_ddp_state
+ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
- int status;
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_format_ver.major,
@@ -1285,79 +1593,52 @@ static enum ice_ddp_state ice_download_pkg(struct ice_hw *hw,
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
- status = ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
- le32_to_cpu(ice_buf_tbl->buf_count));
-
- ice_post_pkg_dwnld_vlan_mode_cfg(hw);
-
- return status;
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
}
/**
- * ice_aq_download_pkg
+ * ice_download_pkg
* @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer to transfer
- * @buf_size: the size of the package buffer
- * @last_buf: last buffer indicator
- * @error_offset: returns error offset
- * @error_info: returns error information
- * @cd: pointer to command details structure or NULL
+ * @pkg_hdr: pointer to package header
+ * @ice_seg: pointer to the segment of the package to be downloaded
*
- * Download Package (0x0C40)
+ * Handles the download of a complete package.
*/
-int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd)
+static enum ice_ddp_state
+ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
+ struct ice_seg *ice_seg)
{
- struct ice_aqc_download_pkg *cmd;
- struct ice_aq_desc desc;
- int status;
-
- if (error_offset)
- *error_offset = 0;
- if (error_info)
- *error_info = 0;
-
- cmd = &desc.params.download_pkg;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
-
- if (last_buf)
- cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+ enum ice_ddp_state state;
- status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
- if (status == -EIO) {
- /* Read error from buffer only when the FW returned an error */
- struct ice_aqc_download_pkg_resp *resp;
+ if (hw->pkg_has_signing_seg)
+ state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
+ else
+ state = ice_download_pkg_without_sig_seg(hw, ice_seg);
- resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
- if (error_offset)
- *error_offset = le32_to_cpu(resp->error_offset);
- if (error_info)
- *error_info = le32_to_cpu(resp->error_info);
- }
+ ice_post_pkg_dwnld_vlan_mode_cfg(hw);
- return status;
+ return state;
}
/**
- * ice_aq_upload_section
+ * ice_aq_get_pkg_info_list
* @hw: pointer to the hardware structure
- * @pkg_buf: the package buffer which will receive the section
- * @buf_size: the size of the package buffer
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
* @cd: pointer to command details structure or NULL
*
- * Upload Section (0x0C41)
+ * Get Package Info List (0x0C43)
*/
-int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, struct ice_sq_cd *cd)
+static int ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
- desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
- return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
}
/**
@@ -1408,6 +1689,26 @@ static int ice_aq_update_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
}
/**
+ * ice_aq_upload_section
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer which will receive the section
+ * @buf_size: the size of the package buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Upload Section (0x0C41)
+ */
+int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_upload_section);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+}
+
+/**
* ice_update_pkg_no_lock
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
@@ -1470,8 +1771,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* success it returns a pointer to the segment header, otherwise it will
* return NULL.
*/
-struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr)
+static struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
{
u32 i;
@@ -1496,6 +1798,73 @@ struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
}
/**
+ * ice_has_signing_seg - determine if package has a signing segment
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ */
+static bool ice_has_signing_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_SIGNING, pkg_hdr);
+
+ return seg_hdr ? true : false;
+}
+
+/**
+ * ice_get_pkg_segment_id - get correct package segment id, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_segment_id(enum ice_mac_type mac_type)
+{
+ u32 seg_id;
+
+ switch (mac_type) {
+ case ICE_MAC_E830:
+ seg_id = SEGMENT_TYPE_ICE_E830;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ seg_id = SEGMENT_TYPE_ICE_E810;
+ break;
+ }
+
+ return seg_id;
+}
+
+/**
+ * ice_get_pkg_sign_type - get package segment sign type, based on device
+ * @mac_type: MAC type of the device
+ */
+static u32 ice_get_pkg_sign_type(enum ice_mac_type mac_type)
+{
+ u32 sign_type;
+
+ switch (mac_type) {
+ case ICE_MAC_E830:
+ sign_type = SEGMENT_SIGN_TYPE_RSA3K_SBB;
+ break;
+ case ICE_MAC_GENERIC:
+ default:
+ sign_type = SEGMENT_SIGN_TYPE_RSA2K;
+ break;
+ }
+
+ return sign_type;
+}
+
+/**
+ * ice_get_signing_req - get correct package requirements, based on device
+ * @hw: pointer to the hardware structure
+ */
+static void ice_get_signing_req(struct ice_hw *hw)
+{
+ hw->pkg_seg_id = ice_get_pkg_segment_id(hw->mac_type);
+ hw->pkg_sign_type = ice_get_pkg_sign_type(hw->mac_type);
+}
+
+/**
* ice_init_pkg_info
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to the driver's package hdr
@@ -1510,7 +1879,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
if (!pkg_hdr)
return ICE_DDP_PKG_ERR;
- seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ hw->pkg_has_signing_seg = ice_has_signing_seg(hw, pkg_hdr);
+ ice_get_signing_req(hw);
+
+ ice_debug(hw, ICE_DBG_INIT, "Pkg using segment id: 0x%08X\n",
+ hw->pkg_seg_id);
+
+ seg_hdr = (struct ice_generic_seg_hdr *)
+ ice_find_seg_in_pkg(hw, hw->pkg_seg_id, pkg_hdr);
if (seg_hdr) {
struct ice_meta_sect *meta;
struct ice_pkg_enum state;
@@ -1558,21 +1934,14 @@ static enum ice_ddp_state ice_init_pkg_info(struct ice_hw *hw,
*/
static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
{
- enum ice_ddp_state state = ICE_DDP_PKG_SUCCESS;
- struct ice_aqc_get_pkg_info_resp *pkg_info;
- u16 size;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg_info, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg_info);
u32 i;
- size = struct_size(pkg_info, pkg_info, ICE_PKG_CNT);
- pkg_info = kzalloc(size, GFP_KERNEL);
- if (!pkg_info)
+ if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL))
return ICE_DDP_PKG_ERR;
- if (ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL)) {
- state = ICE_DDP_PKG_ERR;
- goto init_pkg_free_alloc;
- }
-
for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
#define ICE_PKG_FLAG_COUNT 4
char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
@@ -1602,10 +1971,7 @@ static enum ice_ddp_state ice_get_pkg_info(struct ice_hw *hw)
pkg_info->pkg_info[i].name, flags);
}
-init_pkg_free_alloc:
- kfree(pkg_info);
-
- return state;
+ return ICE_DDP_PKG_SUCCESS;
}
/**
@@ -1620,9 +1986,10 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
struct ice_pkg_hdr *ospkg,
struct ice_seg **seg)
{
- struct ice_aqc_get_pkg_info_resp *pkg;
+ DEFINE_FLEX(struct ice_aqc_get_pkg_info_resp, pkg, pkg_info,
+ ICE_PKG_CNT);
+ u16 size = __struct_size(pkg);
enum ice_ddp_state state;
- u16 size;
u32 i;
/* Check package version compatibility */
@@ -1633,7 +2000,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* find ICE segment in given package */
- *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE,
+ *seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, hw->pkg_seg_id,
ospkg);
if (!*seg) {
ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
@@ -1641,15 +2008,8 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
}
/* Check if FW is compatible with the OS package */
- size = struct_size(pkg, pkg_info, ICE_PKG_CNT);
- pkg = kzalloc(size, GFP_KERNEL);
- if (!pkg)
- return ICE_DDP_PKG_ERR;
-
- if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL)) {
- state = ICE_DDP_PKG_LOAD_ERROR;
- goto fw_ddp_compat_free_alloc;
- }
+ if (ice_aq_get_pkg_info_list(hw, pkg, size, NULL))
+ return ICE_DDP_PKG_LOAD_ERROR;
for (i = 0; i < le32_to_cpu(pkg->count); i++) {
/* loop till we find the NVM package */
@@ -1666,8 +2026,7 @@ static enum ice_ddp_state ice_chk_pkg_compat(struct ice_hw *hw,
/* done processing NVM package so break */
break;
}
-fw_ddp_compat_free_alloc:
- kfree(pkg);
+
return state;
}
@@ -1807,6 +2166,11 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
if (state)
return state;
+ /* must be a matching segment */
+ if (hw->pkg_has_signing_seg &&
+ !ice_match_signing_seg(pkg, hw->pkg_seg_id, hw->pkg_sign_type))
+ return ICE_DDP_PKG_ERR;
+
/* before downloading the package, check package version for
* compatibility with driver
*/
@@ -1816,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
- state = ice_download_pkg(hw, seg);
+ state = ice_download_pkg(hw, pkg, seg);
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");
diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.h b/drivers/net/ethernet/intel/ice/ice_ddp.h
index 41acfe26df1c..ff66c2ffb1a2 100644
--- a/drivers/net/ethernet/intel/ice/ice_ddp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ddp.h
@@ -98,10 +98,21 @@ struct ice_pkg_hdr {
__le32 seg_offset[];
};
+/* Package signing algorithm types */
+#define SEGMENT_SIGN_TYPE_INVALID 0x00000000
+#define SEGMENT_SIGN_TYPE_RSA2K 0x00000001
+#define SEGMENT_SIGN_TYPE_RSA3K 0x00000002
+#define SEGMENT_SIGN_TYPE_RSA3K_SBB 0x00000003 /* Secure Boot Block */
+#define SEGMENT_SIGN_TYPE_RSA3K_E825 0x00000005
+
/* generic segment */
struct ice_generic_seg_hdr {
-#define SEGMENT_TYPE_METADATA 0x00000001
-#define SEGMENT_TYPE_ICE 0x00000010
+#define SEGMENT_TYPE_INVALID 0x00000000
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE_E810 0x00000010
+#define SEGMENT_TYPE_SIGNING 0x00001001
+#define SEGMENT_TYPE_ICE_RUN_TIME_CFG 0x00000020
+#define SEGMENT_TYPE_ICE_E830 0x00000017
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
@@ -163,6 +174,18 @@ struct ice_global_metadata_seg {
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
+struct ice_sign_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 seg_id;
+ __le32 sign_type;
+ __le32 signed_seg_idx;
+ __le32 signed_buf_start;
+ __le32 signed_buf_count;
+#define ICE_SIGN_SEG_RESERVED_COUNT 44
+ u8 reserved[ICE_SIGN_SEG_RESERVED_COUNT];
+ struct ice_buf_table buf_tbl;
+};
+
/* section information */
struct ice_section_entry {
__le32 type;
@@ -416,21 +439,13 @@ struct ice_pkg_enum {
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
-int ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
- u16 buf_size, bool last_buf, u32 *error_offset,
- u32 *error_info, struct ice_sq_cd *cd);
int ice_aq_upload_section(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
u16 buf_size, struct ice_sq_cd *cd);
void *ice_pkg_buf_alloc_section(struct ice_buf_build *bld, u32 type, u16 size);
-enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len);
-
struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw);
-struct ice_generic_seg_hdr *ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
- struct ice_pkg_hdr *pkg_hdr);
-
int ice_update_pkg_no_lock(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count);
@@ -439,6 +454,4 @@ u16 ice_pkg_buf_get_active_sections(struct ice_buf_build *bld);
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type);
-struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf);
-
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 6d560d1c74a4..a2d384dbfc76 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#ifndef _ICE_DEVIDS_H_
#define _ICE_DEVIDS_H_
@@ -16,6 +16,14 @@
#define ICE_DEV_ID_E823L_1GBE 0x124F
/* Intel(R) Ethernet Connection E823-L for QSFP */
#define ICE_DEV_ID_E823L_QSFP 0x151D
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-C for SFP-DD */
+#define ICE_DEV_ID_E830_SFP_DD 0x12D4
/* Intel(R) Ethernet Controller E810-C for backplane */
#define ICE_DEV_ID_E810C_BACKPLANE 0x1591
/* Intel(R) Ethernet Controller E810-C for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.c b/drivers/net/ethernet/intel/ice/ice_dpll.c
new file mode 100644
index 000000000000..86b180cb32a0
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.c
@@ -0,0 +1,2117 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_trace.h"
+#include <linux/dpll.h>
+
+#define ICE_CGU_STATE_ACQ_ERR_THRESHOLD 50
+#define ICE_DPLL_PIN_IDX_INVALID 0xff
+#define ICE_DPLL_RCLK_NUM_PER_PF 1
+
+/**
+ * enum ice_dpll_pin_type - enumerate ice pin types:
+ * @ICE_DPLL_PIN_INVALID: invalid pin type
+ * @ICE_DPLL_PIN_TYPE_INPUT: input pin
+ * @ICE_DPLL_PIN_TYPE_OUTPUT: output pin
+ * @ICE_DPLL_PIN_TYPE_RCLK_INPUT: recovery clock input pin
+ */
+enum ice_dpll_pin_type {
+ ICE_DPLL_PIN_INVALID,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ ICE_DPLL_PIN_TYPE_OUTPUT,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+};
+
+static const char * const pin_type_name[] = {
+ [ICE_DPLL_PIN_TYPE_INPUT] = "input",
+ [ICE_DPLL_PIN_TYPE_OUTPUT] = "output",
+ [ICE_DPLL_PIN_TYPE_RCLK_INPUT] = "rclk-input",
+};
+
+/**
+ * ice_dpll_pin_freq_set - set pin's frequency
+ * @pf: private board structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being configured
+ * @freq: frequency to be set
+ * @extack: error reporting
+ *
+ * Set requested frequency on a pin.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error on AQ or wrong pin type given
+ */
+static int
+ice_dpll_pin_freq_set(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type, const u32 freq,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ flags = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, pin->idx, flags,
+ pin->flags[0], freq, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flags = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_FREQ;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, pin->idx, flags,
+ 0, freq, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin freq:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ freq, pin->idx);
+ return ret;
+ }
+ pin->freq = freq;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_frequency_set - wrapper for pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 frequency,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_freq_set(pf, p, pin_type, frequency, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_frequency_set - input pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_input_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_set - output pin callback for set frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: frequency to be set
+ * @extack: error reporting
+ *
+ * Wraps internal set frequency command on a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't set in hw
+ */
+static int
+ice_dpll_output_frequency_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_set(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_frequency_get - wrapper for pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ * @pin_type: type of pin being configured
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *frequency = p->freq;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_frequency_get - input pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_input_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_frequency_get - output pin callback for get frequency
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: pointer to dpll
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @frequency: on success holds pin's frequency
+ * @extack: error reporting
+ *
+ * Wraps internal get frequency command of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error pin not found or couldn't get from hw
+ */
+static int
+ice_dpll_output_frequency_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack)
+{
+ return ice_dpll_frequency_get(pin, pin_priv, dpll, dpll_priv, frequency,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_pin_enable - enable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being enabled
+ * @extack: error reporting
+ *
+ * Enable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_enable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to enable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_disable - disable a pin on dplls
+ * @hw: board private hw structure
+ * @pin: pointer to a pin
+ * @pin_type: type of pin being disabled
+ * @extack: error reporting
+ *
+ * Disable a pin on both dplls. Store current state in pin->flags.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_disable(struct ice_hw *hw, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 flags = 0;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ ret = ice_aq_set_input_pin_cfg(hw, pin->idx, 0, flags, 0, 0);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ if (pin->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flags |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(hw, pin->idx, flags, 0, 0, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to disable %s pin:%u\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_pin_state_update - update pin's state
+ * @pf: private board struct
+ * @pin: structure with pin attributes to be updated
+ * @pin_type: type of pin being updated
+ * @extack: error reporting
+ *
+ * Determine pin current state and frequency, then update struct
+ * holding the pin info. For input pin states are separated for each
+ * dpll, for rclk pins states are separated for each parent.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - OK
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_update(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ enum ice_dpll_pin_type pin_type,
+ struct netlink_ext_ack *extack)
+{
+ u8 parent, port_num = ICE_AQC_SET_PHY_REC_CLK_OUT_CURR_PORT;
+ int ret;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ ret = ice_aq_get_input_pin_cfg(&pf->hw, pin->idx, NULL, NULL,
+ NULL, &pin->flags[0],
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN & pin->flags[0]) {
+ if (pin->pin) {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ pin->pin == pf->dplls.eec.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ pin->pin == pf->dplls.pps.active_input ?
+ DPLL_PIN_STATE_CONNECTED :
+ DPLL_PIN_STATE_SELECTABLE;
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_SELECTABLE;
+ }
+ } else {
+ pin->state[pf->dplls.eec.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ pin->state[pf->dplls.pps.dpll_idx] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ ret = ice_aq_get_output_pin_cfg(&pf->hw, pin->idx,
+ &pin->flags[0], NULL,
+ &pin->freq, NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_SET_CGU_OUT_CFG_OUT_EN & pin->flags[0])
+ pin->state[0] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[0] = DPLL_PIN_STATE_DISCONNECTED;
+ break;
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ for (parent = 0; parent < pf->dplls.rclk.num_parents;
+ parent++) {
+ u8 p = parent;
+
+ ret = ice_aq_get_phy_rec_clk_out(&pf->hw, &p,
+ &port_num,
+ &pin->flags[parent],
+ NULL);
+ if (ret)
+ goto err;
+ if (ICE_AQC_GET_PHY_REC_CLK_OUT_OUT_EN &
+ pin->flags[parent])
+ pin->state[parent] = DPLL_PIN_STATE_CONNECTED;
+ else
+ pin->state[parent] =
+ DPLL_PIN_STATE_DISCONNECTED;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+err:
+ if (extack)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ else
+ dev_err_ratelimited(ice_pf_to_dev(pf),
+ "err:%d %s failed to update %s pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ pin_type_name[pin_type], pin->idx);
+ return ret;
+}
+
+/**
+ * ice_dpll_hw_input_prio_set - set input priority value in hardware
+ * @pf: board private structure
+ * @dpll: ice dpll pointer
+ * @pin: ice pin pointer
+ * @prio: priority value being set on a dpll
+ * @extack: error reporting
+ *
+ * Internal wrapper for setting the priority in the hardware.
+ *
+ * Context: Called under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_hw_input_prio_set(struct ice_pf *pf, struct ice_dpll *dpll,
+ struct ice_dpll_pin *pin, const u32 prio,
+ struct netlink_ext_ack *extack)
+{
+ int ret;
+
+ ret = ice_aq_set_cgu_ref_prio(&pf->hw, dpll->dpll_idx, pin->idx,
+ (u8)prio);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin prio:%u on pin:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ prio, pin->idx);
+ else
+ dpll->input_prio[pin->idx] = prio;
+
+ return ret;
+}
+
+/**
+ * ice_dpll_lock_status_get - get dpll lock status callback
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @status: on success holds dpll's lock status
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, provides dpll's lock status.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_lock_status_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *status = d->dpll_state;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_mode_supported - check if dpll's working mode is supported
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: mode to be checked for support
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides information if working mode is supported
+ * by dpll.
+ *
+ * Return:
+ * * true - mode is supported
+ * * false - mode is not supported
+ */
+static bool ice_dpll_mode_supported(const struct dpll_device *dpll,
+ void *dpll_priv,
+ enum dpll_mode mode,
+ struct netlink_ext_ack *extack)
+{
+ if (mode == DPLL_MODE_AUTOMATIC)
+ return true;
+
+ return false;
+}
+
+/**
+ * ice_dpll_mode_get - get dpll's working mode
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @mode: on success holds current working mode of dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Provides working mode of dpll.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int ice_dpll_mode_get(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *mode = d->mode;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_state_set - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @enable: if pin shalll be enabled
+ * @extack: error reporting
+ * @pin_type: type of a pin
+ *
+ * Set pin state on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - OK or no change required
+ * * negative - error
+ */
+static int
+ice_dpll_pin_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ bool enable, struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ if (enable)
+ ret = ice_dpll_pin_enable(&pf->hw, p, pin_type, extack);
+ else
+ ret = ice_dpll_pin_disable(&pf->hw, p, pin_type, extack);
+ if (!ret)
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_set - enable/disable output pin on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Set given state on output type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_set - enable/disable input pin on dpll levice
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: dpll being configured
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: state of pin to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Enables given mode on input type pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - successfully enabled mode
+ * * negative - failed to enable mode
+ */
+static int
+ice_dpll_input_state_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ bool enable = state == DPLL_PIN_STATE_SELECTABLE;
+
+ return ice_dpll_pin_state_set(pin, pin_priv, dpll, dpll_priv, enable,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_pin_state_get - set pin's state on dpll
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ * @pin_type: type of questioned pin
+ *
+ * Determine pin state set it on a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_pin_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_pin_state_update(pf, p, pin_type, extack);
+ if (ret)
+ goto unlock;
+ if (pin_type == ICE_DPLL_PIN_TYPE_INPUT)
+ *state = p->state[d->dpll_idx];
+ else if (pin_type == ICE_DPLL_PIN_TYPE_OUTPUT)
+ *state = p->state[0];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_output_state_get - get output pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_output_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+/**
+ * ice_dpll_input_state_get - get input pin state on dpll device
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @state: on success holds state of the pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Check state of a input pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failed to get state
+ */
+static int
+ice_dpll_input_state_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_state_get(pin, pin_priv, dpll, dpll_priv, state,
+ extack, ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_input_prio_get - get dpll's input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: on success - returns input priority on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *prio = d->input_prio[p->idx];
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_input_prio_set - set dpll input prio
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @prio: input priority to be set on dpll
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for setting priority of a input pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 prio, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_direction - callback for get input pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds input pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of a input pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_input_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_INPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_output_direction - callback for get output pin direction
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @direction: holds output pin direction
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting direction of an output pin.
+ *
+ * Return:
+ * * 0 - success
+ */
+static int
+ice_dpll_output_direction(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack)
+{
+ *direction = DPLL_PIN_DIRECTION_OUTPUT;
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_phase_adjust_get - callback for get pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: on success holds pin phase_adjust value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting phase adjust value of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_phase_adjust_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_pf *pf = p->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ *phase_adjust = p->phase_adjust;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_pin_phase_adjust_set - helper for setting a pin phase adjust value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ * @type: type of a pin
+ *
+ * Helper for dpll subsystem callback. Handler for setting phase adjust value
+ * of a pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_pin_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack,
+ enum ice_dpll_pin_type type)
+{
+ struct ice_dpll_pin *p = pin_priv;
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+ u8 flag, flags_en = 0;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ switch (type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ flag = ICE_AQC_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY;
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_ESYNC_EN)
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_ESYNC_EN;
+ if (p->flags[0] & ICE_AQC_GET_CGU_IN_CFG_FLG2_INPUT_EN)
+ flags_en |= ICE_AQC_SET_CGU_IN_CFG_FLG2_INPUT_EN;
+ ret = ice_aq_set_input_pin_cfg(&pf->hw, p->idx, flag, flags_en,
+ 0, phase_adjust);
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ flag = ICE_AQC_SET_CGU_OUT_CFG_UPDATE_PHASE;
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_OUT_EN)
+ flag |= ICE_AQC_SET_CGU_OUT_CFG_OUT_EN;
+ if (p->flags[0] & ICE_AQC_GET_CGU_OUT_CFG_ESYNC_EN)
+ flag |= ICE_AQC_SET_CGU_OUT_CFG_ESYNC_EN;
+ ret = ice_aq_set_output_pin_cfg(&pf->hw, p->idx, flag, 0, 0,
+ phase_adjust);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ if (!ret)
+ p->phase_adjust = phase_adjust;
+ mutex_unlock(&pf->dplls.lock);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin phase_adjust:%d for pin:%u on dpll:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ phase_adjust, p->idx, d->dpll_idx);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_input_phase_adjust_set - callback for set input pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on input
+ * pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_input_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_INPUT);
+}
+
+/**
+ * ice_dpll_output_phase_adjust_set - callback for set output pin phase adjust
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_adjust: phase_adjust to be set
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Wraps a handler for setting phase adjust on output
+ * pin.
+ *
+ * Context: Calls a function which acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_output_phase_adjust_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 phase_adjust,
+ struct netlink_ext_ack *extack)
+{
+ return ice_dpll_pin_phase_adjust_set(pin, pin_priv, dpll, dpll_priv,
+ phase_adjust, extack,
+ ICE_DPLL_PIN_TYPE_OUTPUT);
+}
+
+#define ICE_DPLL_PHASE_OFFSET_DIVIDER 100
+#define ICE_DPLL_PHASE_OFFSET_FACTOR \
+ (DPLL_PHASE_OFFSET_DIVIDER / ICE_DPLL_PHASE_OFFSET_DIVIDER)
+/**
+ * ice_dpll_phase_offset_get - callback for get dpll phase shift value
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @dpll: registered dpll pointer
+ * @dpll_priv: private data pointer passed on dpll registration
+ * @phase_offset: on success holds pin phase_offset value
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback. Handler for getting phase shift value between
+ * dpll's input and output.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - error
+ */
+static int
+ice_dpll_phase_offset_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset, struct netlink_ext_ack *extack)
+{
+ struct ice_dpll *d = dpll_priv;
+ struct ice_pf *pf = d->pf;
+
+ mutex_lock(&pf->dplls.lock);
+ if (d->active_input == pin)
+ *phase_offset = d->phase_offset * ICE_DPLL_PHASE_OFFSET_FACTOR;
+ else
+ *phase_offset = 0;
+ mutex_unlock(&pf->dplls.lock);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_set - set a state on rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: parent private data pointer passed on pin registration
+ * @state: state to be set on pin
+ * @extack: error reporting
+ *
+ * Dpll subsystem callback, set a state of a rclk pin on a parent pin
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_set(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ bool enable = state == DPLL_PIN_STATE_CONNECTED;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ if ((enable && p->state[hw_idx] == DPLL_PIN_STATE_CONNECTED) ||
+ (!enable && p->state[hw_idx] == DPLL_PIN_STATE_DISCONNECTED)) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "pin:%u state:%u on parent:%u already set",
+ p->idx, state, parent->idx);
+ goto unlock;
+ }
+ ret = ice_aq_set_phy_rec_clk_out(&pf->hw, hw_idx, enable,
+ &p->freq);
+ if (ret)
+ NL_SET_ERR_MSG_FMT(extack,
+ "err:%d %s failed to set pin state:%u for pin:%u on parent:%u\n",
+ ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status),
+ state, p->idx, parent->idx);
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+/**
+ * ice_dpll_rclk_state_on_pin_get - get a state of rclk pin
+ * @pin: pointer to a pin
+ * @pin_priv: private data pointer passed on pin registration
+ * @parent_pin: pin parent pointer
+ * @parent_pin_priv: pin parent priv data pointer passed on pin registration
+ * @state: on success holds pin state on parent pin
+ * @extack: error reporting
+ *
+ * dpll subsystem callback, get a state of a recovered clock pin.
+ *
+ * Context: Acquires pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - failure
+ */
+static int
+ice_dpll_rclk_state_on_pin_get(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_dpll_pin *p = pin_priv, *parent = parent_pin_priv;
+ struct ice_pf *pf = p->pf;
+ int ret = -EINVAL;
+ u32 hw_idx;
+
+ mutex_lock(&pf->dplls.lock);
+ hw_idx = parent->idx - pf->dplls.base_rclk_idx;
+ if (hw_idx >= pf->dplls.num_inputs)
+ goto unlock;
+
+ ret = ice_dpll_pin_state_update(pf, p, ICE_DPLL_PIN_TYPE_RCLK_INPUT,
+ extack);
+ if (ret)
+ goto unlock;
+
+ *state = p->state[hw_idx];
+ ret = 0;
+unlock:
+ mutex_unlock(&pf->dplls.lock);
+
+ return ret;
+}
+
+static const struct dpll_pin_ops ice_dpll_rclk_ops = {
+ .state_on_pin_set = ice_dpll_rclk_state_on_pin_set,
+ .state_on_pin_get = ice_dpll_rclk_state_on_pin_get,
+ .direction_get = ice_dpll_input_direction,
+};
+
+static const struct dpll_pin_ops ice_dpll_input_ops = {
+ .frequency_get = ice_dpll_input_frequency_get,
+ .frequency_set = ice_dpll_input_frequency_set,
+ .state_on_dpll_get = ice_dpll_input_state_get,
+ .state_on_dpll_set = ice_dpll_input_state_set,
+ .prio_get = ice_dpll_input_prio_get,
+ .prio_set = ice_dpll_input_prio_set,
+ .direction_get = ice_dpll_input_direction,
+ .phase_adjust_get = ice_dpll_pin_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_input_phase_adjust_set,
+ .phase_offset_get = ice_dpll_phase_offset_get,
+};
+
+static const struct dpll_pin_ops ice_dpll_output_ops = {
+ .frequency_get = ice_dpll_output_frequency_get,
+ .frequency_set = ice_dpll_output_frequency_set,
+ .state_on_dpll_get = ice_dpll_output_state_get,
+ .state_on_dpll_set = ice_dpll_output_state_set,
+ .direction_get = ice_dpll_output_direction,
+ .phase_adjust_get = ice_dpll_pin_phase_adjust_get,
+ .phase_adjust_set = ice_dpll_output_phase_adjust_set,
+};
+
+static const struct dpll_device_ops ice_dpll_ops = {
+ .lock_status_get = ice_dpll_lock_status_get,
+ .mode_supported = ice_dpll_mode_supported,
+ .mode_get = ice_dpll_mode_get,
+};
+
+/**
+ * ice_generate_clock_id - generates unique clock_id for registering dpll.
+ * @pf: board private structure
+ *
+ * Generates unique (per board) clock_id for allocation and search of dpll
+ * devices in Linux dpll subsystem.
+ *
+ * Return: generated clock id for the board
+ */
+static u64 ice_generate_clock_id(struct ice_pf *pf)
+{
+ return pci_get_dsn(pf->pdev);
+}
+
+/**
+ * ice_dpll_notify_changes - notify dpll subsystem about changes
+ * @d: pointer do dpll
+ *
+ * Once change detected appropriate event is submitted to the dpll subsystem.
+ */
+static void ice_dpll_notify_changes(struct ice_dpll *d)
+{
+ bool pin_notified = false;
+
+ if (d->prev_dpll_state != d->dpll_state) {
+ d->prev_dpll_state = d->dpll_state;
+ dpll_device_change_ntf(d->dpll);
+ }
+ if (d->prev_input != d->active_input) {
+ if (d->prev_input)
+ dpll_pin_change_ntf(d->prev_input);
+ d->prev_input = d->active_input;
+ if (d->active_input) {
+ dpll_pin_change_ntf(d->active_input);
+ pin_notified = true;
+ }
+ }
+ if (d->prev_phase_offset != d->phase_offset) {
+ d->prev_phase_offset = d->phase_offset;
+ if (!pin_notified && d->active_input)
+ dpll_pin_change_ntf(d->active_input);
+ }
+}
+
+/**
+ * ice_dpll_update_state - update dpll state
+ * @pf: pf private structure
+ * @d: pointer to queried dpll device
+ * @init: if function called on initialization of ice dpll
+ *
+ * Poll current state of dpll from hw and update ice_dpll struct.
+ *
+ * Context: Called by kworker under pf->dplls.lock
+ * Return:
+ * * 0 - success
+ * * negative - AQ failure
+ */
+static int
+ice_dpll_update_state(struct ice_pf *pf, struct ice_dpll *d, bool init)
+{
+ struct ice_dpll_pin *p = NULL;
+ int ret;
+
+ ret = ice_get_cgu_state(&pf->hw, d->dpll_idx, d->prev_dpll_state,
+ &d->input_idx, &d->ref_state, &d->eec_mode,
+ &d->phase_offset, &d->dpll_state);
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "update dpll=%d, prev_src_idx:%u, src_idx:%u, state:%d, prev:%d mode:%d\n",
+ d->dpll_idx, d->prev_input_idx, d->input_idx,
+ d->dpll_state, d->prev_dpll_state, d->mode);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "update dpll=%d state failed, ret=%d %s\n",
+ d->dpll_idx, ret,
+ ice_aq_str(pf->hw.adminq.sq_last_status));
+ return ret;
+ }
+ if (init) {
+ if (d->dpll_state == DPLL_LOCK_STATUS_LOCKED ||
+ d->dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ)
+ d->active_input = pf->dplls.inputs[d->input_idx].pin;
+ p = &pf->dplls.inputs[d->input_idx];
+ return ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ }
+ if (d->dpll_state == DPLL_LOCK_STATUS_HOLDOVER ||
+ d->dpll_state == DPLL_LOCK_STATUS_UNLOCKED) {
+ d->active_input = NULL;
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID)
+ p = &pf->dplls.inputs[d->input_idx];
+ d->prev_input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ d->input_idx = ICE_DPLL_PIN_IDX_INVALID;
+ if (!p)
+ return 0;
+ ret = ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT, NULL);
+ } else if (d->input_idx != d->prev_input_idx) {
+ if (d->prev_input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->prev_input_idx];
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ if (d->input_idx != ICE_DPLL_PIN_IDX_INVALID) {
+ p = &pf->dplls.inputs[d->input_idx];
+ d->active_input = p->pin;
+ ice_dpll_pin_state_update(pf, p,
+ ICE_DPLL_PIN_TYPE_INPUT,
+ NULL);
+ }
+ d->prev_input_idx = d->input_idx;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_periodic_work - DPLLs periodic worker
+ * @work: pointer to kthread_work structure
+ *
+ * DPLLs periodic worker is responsible for polling state of dpll.
+ * Context: Holds pf->dplls.lock
+ */
+static void ice_dpll_periodic_work(struct kthread_work *work)
+{
+ struct ice_dplls *d = container_of(work, struct ice_dplls, work.work);
+ struct ice_pf *pf = container_of(d, struct ice_pf, dplls);
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ int ret;
+
+ mutex_lock(&pf->dplls.lock);
+ ret = ice_dpll_update_state(pf, de, false);
+ if (!ret)
+ ret = ice_dpll_update_state(pf, dp, false);
+ if (ret) {
+ d->cgu_state_acq_err_num++;
+ /* stop rescheduling this worker */
+ if (d->cgu_state_acq_err_num >
+ ICE_CGU_STATE_ACQ_ERR_THRESHOLD) {
+ dev_err(ice_pf_to_dev(pf),
+ "EEC/PPS DPLLs periodic work disabled\n");
+ mutex_unlock(&pf->dplls.lock);
+ return;
+ }
+ }
+ mutex_unlock(&pf->dplls.lock);
+ ice_dpll_notify_changes(de);
+ ice_dpll_notify_changes(dp);
+
+ /* Run twice a second or reschedule if update failed */
+ kthread_queue_delayed_work(d->kworker, &d->work,
+ ret ? msecs_to_jiffies(10) :
+ msecs_to_jiffies(500));
+}
+
+/**
+ * ice_dpll_release_pins - release pins resources from dpll subsystem
+ * @pins: pointer to pins array
+ * @count: number of pins
+ *
+ * Release resources of given pins array in the dpll subsystem.
+ */
+static void ice_dpll_release_pins(struct ice_dpll_pin *pins, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_put(pins[i].pin);
+}
+
+/**
+ * ice_dpll_get_pins - get pins from dpll subsystem
+ * @pf: board private structure
+ * @pins: pointer to pins array
+ * @start_idx: get starts from this pin idx value
+ * @count: number of pins
+ * @clock_id: clock_id of dpll device
+ *
+ * Get pins - allocate - in dpll subsystem, store them in pin field of given
+ * pins array.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - allocation failure reason
+ */
+static int
+ice_dpll_get_pins(struct ice_pf *pf, struct ice_dpll_pin *pins,
+ int start_idx, int count, u64 clock_id)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ pins[i].pin = dpll_pin_get(clock_id, i + start_idx, THIS_MODULE,
+ &pins[i].prop);
+ if (IS_ERR(pins[i].pin)) {
+ ret = PTR_ERR(pins[i].pin);
+ goto release_pins;
+ }
+ }
+
+ return 0;
+
+release_pins:
+ while (--i >= 0)
+ dpll_pin_put(pins[i].pin);
+ return ret;
+}
+
+/**
+ * ice_dpll_unregister_pins - unregister pins from a dpll
+ * @dpll: dpll device pointer
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Unregister pins of a given array of pins from given dpll device registered in
+ * dpll subsystem.
+ */
+static void
+ice_dpll_unregister_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+}
+
+/**
+ * ice_dpll_register_pins - register pins with a dpll
+ * @dpll: dpll pointer to register pins with
+ * @pins: pointer to pins array
+ * @ops: callback ops registered with the pins
+ * @count: number of pins
+ *
+ * Register pins of a given array with given dpll in dpll subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_register_pins(struct dpll_device *dpll, struct ice_dpll_pin *pins,
+ const struct dpll_pin_ops *ops, int count)
+{
+ int ret, i;
+
+ for (i = 0; i < count; i++) {
+ ret = dpll_pin_register(dpll, pins[i].pin, ops, &pins[i]);
+ if (ret)
+ goto unregister_pins;
+ }
+
+ return 0;
+
+unregister_pins:
+ while (--i >= 0)
+ dpll_pin_unregister(dpll, pins[i].pin, ops, &pins[i]);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_direct_pins - deinitialize direct pins
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * If cgu is owned unregister pins from given dplls.
+ * Release pins resources to the dpll subsystem.
+ */
+static void
+ice_dpll_deinit_direct_pins(bool cgu, struct ice_dpll_pin *pins, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first,
+ struct dpll_device *second)
+{
+ if (cgu) {
+ ice_dpll_unregister_pins(first, pins, ops, count);
+ ice_dpll_unregister_pins(second, pins, ops, count);
+ }
+ ice_dpll_release_pins(pins, count);
+}
+
+/**
+ * ice_dpll_init_direct_pins - initialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ * @pins: pointer to pins array
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @count: number of pins
+ * @ops: callback ops registered with the pins
+ * @first: dpll device pointer
+ * @second: dpll device pointer
+ *
+ * Allocate directly connected pins of a given array in dpll subsystem.
+ * If cgu is owned register allocated pins with given dplls.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_direct_pins(struct ice_pf *pf, bool cgu,
+ struct ice_dpll_pin *pins, int start_idx, int count,
+ const struct dpll_pin_ops *ops,
+ struct dpll_device *first, struct dpll_device *second)
+{
+ int ret;
+
+ ret = ice_dpll_get_pins(pf, pins, start_idx, count, pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_register_pins(first, pins, ops, count);
+ if (ret)
+ goto release_pins;
+ ret = ice_dpll_register_pins(second, pins, ops, count);
+ if (ret)
+ goto unregister_first;
+ }
+
+ return 0;
+
+unregister_first:
+ ice_dpll_unregister_pins(first, pins, ops, count);
+release_pins:
+ ice_dpll_release_pins(pins, count);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_rclk_pin - release rclk pin resources
+ * @pf: board private structure
+ *
+ * Deregister rclk pin from parent pins and release resources in dpll subsystem.
+ */
+static void ice_dpll_deinit_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *rclk = &pf->dplls.rclk;
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int i;
+
+ for (i = 0; i < rclk->num_parents; i++) {
+ parent = pf->dplls.inputs[rclk->parent_idx[i]].pin;
+ if (!parent)
+ continue;
+ dpll_pin_on_pin_unregister(parent, rclk->pin,
+ &ice_dpll_rclk_ops, rclk);
+ }
+ if (WARN_ON_ONCE(!vsi || !vsi->netdev))
+ return;
+ netdev_dpll_pin_clear(vsi->netdev);
+ dpll_pin_put(rclk->pin);
+}
+
+/**
+ * ice_dpll_init_rclk_pins - initialize recovered clock pin
+ * @pf: board private structure
+ * @pin: pin to register
+ * @start_idx: on which index shall allocation start in dpll subsystem
+ * @ops: callback ops registered with the pins
+ *
+ * Allocate resource for recovered clock pin in dpll subsystem. Register the
+ * pin with the parents it has in the info. Register pin with the pf's main vsi
+ * netdev.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - registration failure reason
+ */
+static int
+ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
+ int start_idx, const struct dpll_pin_ops *ops)
+{
+ struct ice_vsi *vsi = ice_get_main_vsi(pf);
+ struct dpll_pin *parent;
+ int ret, i;
+
+ ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
+ pf->dplls.clock_id);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[i]].pin;
+ if (!parent) {
+ ret = -ENODEV;
+ goto unregister_pins;
+ }
+ ret = dpll_pin_on_pin_register(parent, pf->dplls.rclk.pin,
+ ops, &pf->dplls.rclk);
+ if (ret)
+ goto unregister_pins;
+ }
+ if (WARN_ON((!vsi || !vsi->netdev)))
+ return -EINVAL;
+ netdev_dpll_pin_set(vsi->netdev, pf->dplls.rclk.pin);
+
+ return 0;
+
+unregister_pins:
+ while (i) {
+ parent = pf->dplls.inputs[pf->dplls.rclk.parent_idx[--i]].pin;
+ dpll_pin_on_pin_unregister(parent, pf->dplls.rclk.pin,
+ &ice_dpll_rclk_ops, &pf->dplls.rclk);
+ }
+ ice_dpll_release_pins(pin, ICE_DPLL_RCLK_NUM_PER_PF);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_pins - deinitialize direct pins
+ * @pf: board private structure
+ * @cgu: if cgu is controlled by this pf
+ *
+ * If cgu is owned unregister directly connected pins from the dplls.
+ * Release resources of directly connected pins from the dpll subsystem.
+ */
+static void ice_dpll_deinit_pins(struct ice_pf *pf, bool cgu)
+{
+ struct ice_dpll_pin *outputs = pf->dplls.outputs;
+ struct ice_dpll_pin *inputs = pf->dplls.inputs;
+ int num_outputs = pf->dplls.num_outputs;
+ int num_inputs = pf->dplls.num_inputs;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_dpll *de = &d->eec;
+ struct ice_dpll *dp = &d->pps;
+
+ ice_dpll_deinit_rclk_pin(pf);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ ice_dpll_unregister_pins(de->dpll, inputs, &ice_dpll_input_ops,
+ num_inputs);
+ }
+ ice_dpll_release_pins(inputs, num_inputs);
+ if (cgu) {
+ ice_dpll_unregister_pins(dp->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_unregister_pins(de->dpll, outputs,
+ &ice_dpll_output_ops, num_outputs);
+ ice_dpll_release_pins(outputs, num_outputs);
+ }
+}
+
+/**
+ * ice_dpll_init_pins - init pins and register pins with a dplls
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Initialize directly connected pf's pins within pf's dplls in a Linux dpll
+ * subsystem.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int ice_dpll_init_pins(struct ice_pf *pf, bool cgu)
+{
+ u32 rclk_idx;
+ int ret;
+
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.inputs, 0,
+ pf->dplls.num_inputs,
+ &ice_dpll_input_ops,
+ pf->dplls.eec.dpll, pf->dplls.pps.dpll);
+ if (ret)
+ return ret;
+ if (cgu) {
+ ret = ice_dpll_init_direct_pins(pf, cgu, pf->dplls.outputs,
+ pf->dplls.num_inputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops,
+ pf->dplls.eec.dpll,
+ pf->dplls.pps.dpll);
+ if (ret)
+ goto deinit_inputs;
+ }
+ rclk_idx = pf->dplls.num_inputs + pf->dplls.num_outputs + pf->hw.pf_id;
+ ret = ice_dpll_init_rclk_pins(pf, &pf->dplls.rclk, rclk_idx,
+ &ice_dpll_rclk_ops);
+ if (ret)
+ goto deinit_outputs;
+
+ return 0;
+deinit_outputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.outputs,
+ pf->dplls.num_outputs,
+ &ice_dpll_output_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+deinit_inputs:
+ ice_dpll_deinit_direct_pins(cgu, pf->dplls.inputs, pf->dplls.num_inputs,
+ &ice_dpll_input_ops, pf->dplls.pps.dpll,
+ pf->dplls.eec.dpll);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit_dpll - deinitialize dpll device
+ * @pf: board private structure
+ * @d: pointer to ice_dpll
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * If cgu is owned unregister the dpll from dpll subsystem.
+ * Release resources of dpll device from dpll subsystem.
+ */
+static void
+ice_dpll_deinit_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu)
+{
+ if (cgu)
+ dpll_device_unregister(d->dpll, &ice_dpll_ops, d);
+ dpll_device_put(d->dpll);
+}
+
+/**
+ * ice_dpll_init_dpll - initialize dpll device in dpll subsystem
+ * @pf: board private structure
+ * @d: dpll to be initialized
+ * @cgu: if cgu is present and controlled by this NIC
+ * @type: type of dpll being initialized
+ *
+ * Allocate dpll instance for this board in dpll subsystem, if cgu is controlled
+ * by this NIC, register dpll with the callback ops.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - initialization failure reason
+ */
+static int
+ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
+ enum dpll_type type)
+{
+ u64 clock_id = pf->dplls.clock_id;
+ int ret;
+
+ d->dpll = dpll_device_get(clock_id, d->dpll_idx, THIS_MODULE);
+ if (IS_ERR(d->dpll)) {
+ ret = PTR_ERR(d->dpll);
+ dev_err(ice_pf_to_dev(pf),
+ "dpll_device_get failed (%p) err=%d\n", d, ret);
+ return ret;
+ }
+ d->pf = pf;
+ if (cgu) {
+ ice_dpll_update_state(pf, d, true);
+ ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
+ if (ret) {
+ dpll_device_put(d->dpll);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_dpll_deinit_worker - deinitialize dpll kworker
+ * @pf: board private structure
+ *
+ * Stop dpll's kworker, release it's resources.
+ */
+static void ice_dpll_deinit_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+
+ kthread_cancel_delayed_work_sync(&d->work);
+ kthread_destroy_worker(d->kworker);
+}
+
+/**
+ * ice_dpll_init_worker - Initialize DPLLs periodic worker
+ * @pf: board private structure
+ *
+ * Create and start DPLLs periodic worker.
+ *
+ * Context: Shall be called after pf->dplls.lock is initialized.
+ * Return:
+ * * 0 - success
+ * * negative - create worker failure
+ */
+static int ice_dpll_init_worker(struct ice_pf *pf)
+{
+ struct ice_dplls *d = &pf->dplls;
+ struct kthread_worker *kworker;
+
+ kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
+ kworker = kthread_create_worker(0, "ice-dplls-%s",
+ dev_name(ice_pf_to_dev(pf)));
+ if (IS_ERR(kworker))
+ return PTR_ERR(kworker);
+ d->kworker = kworker;
+ d->cgu_state_acq_err_num = 0;
+ kthread_queue_delayed_work(d->kworker, &d->work, 0);
+
+ return 0;
+}
+
+/**
+ * ice_dpll_init_info_direct_pins - initializes direct pins info
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Init information for directly connected pins, cache them in pf's pins
+ * structures.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_info_direct_pins(struct ice_pf *pf,
+ enum ice_dpll_pin_type pin_type)
+{
+ struct ice_dpll *de = &pf->dplls.eec, *dp = &pf->dplls.pps;
+ int num_pins, i, ret = -EINVAL;
+ struct ice_hw *hw = &pf->hw;
+ struct ice_dpll_pin *pins;
+ unsigned long caps;
+ u8 freq_supp_num;
+ bool input;
+
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ pins = pf->dplls.inputs;
+ num_pins = pf->dplls.num_inputs;
+ input = true;
+ break;
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ pins = pf->dplls.outputs;
+ num_pins = pf->dplls.num_outputs;
+ input = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = 0; i < num_pins; i++) {
+ caps = 0;
+ pins[i].idx = i;
+ pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
+ pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
+ if (input) {
+ ret = ice_aq_get_cgu_ref_prio(hw, de->dpll_idx, i,
+ &de->input_prio[i]);
+ if (ret)
+ return ret;
+ ret = ice_aq_get_cgu_ref_prio(hw, dp->dpll_idx, i,
+ &dp->input_prio[i]);
+ if (ret)
+ return ret;
+ caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
+ DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
+ pins[i].prop.phase_range.min =
+ pf->dplls.input_phase_adj_max;
+ pins[i].prop.phase_range.max =
+ -pf->dplls.input_phase_adj_max;
+ } else {
+ pins[i].prop.phase_range.min =
+ pf->dplls.output_phase_adj_max;
+ pins[i].prop.phase_range.max =
+ -pf->dplls.output_phase_adj_max;
+ ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
+ if (ret)
+ return ret;
+ }
+ pins[i].prop.capabilities = caps;
+ ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
+ if (ret)
+ return ret;
+ pins[i].prop.freq_supported =
+ ice_cgu_get_pin_freq_supp(hw, i, input, &freq_supp_num);
+ pins[i].prop.freq_supported_num = freq_supp_num;
+ pins[i].pf = pf;
+ }
+
+ return ret;
+}
+
+/**
+ * ice_dpll_init_info_rclk_pin - initializes rclk pin information
+ * @pf: board private structure
+ *
+ * Init information for rclk pin, cache them in pf->dplls.rclk.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info_rclk_pin(struct ice_pf *pf)
+{
+ struct ice_dpll_pin *pin = &pf->dplls.rclk;
+
+ pin->prop.type = DPLL_PIN_TYPE_SYNCE_ETH_PORT;
+ pin->prop.capabilities |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+ pin->pf = pf;
+
+ return ice_dpll_pin_state_update(pf, pin,
+ ICE_DPLL_PIN_TYPE_RCLK_INPUT, NULL);
+}
+
+/**
+ * ice_dpll_init_pins_info - init pins info wrapper
+ * @pf: board private structure
+ * @pin_type: type of pins being initialized
+ *
+ * Wraps functions for pin initialization.
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int
+ice_dpll_init_pins_info(struct ice_pf *pf, enum ice_dpll_pin_type pin_type)
+{
+ switch (pin_type) {
+ case ICE_DPLL_PIN_TYPE_INPUT:
+ case ICE_DPLL_PIN_TYPE_OUTPUT:
+ return ice_dpll_init_info_direct_pins(pf, pin_type);
+ case ICE_DPLL_PIN_TYPE_RCLK_INPUT:
+ return ice_dpll_init_info_rclk_pin(pf);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_dpll_deinit_info - release memory allocated for pins info
+ * @pf: board private structure
+ *
+ * Release memory allocated for pins by ice_dpll_init_info function.
+ */
+static void ice_dpll_deinit_info(struct ice_pf *pf)
+{
+ kfree(pf->dplls.inputs);
+ kfree(pf->dplls.outputs);
+ kfree(pf->dplls.eec.input_prio);
+ kfree(pf->dplls.pps.input_prio);
+}
+
+/**
+ * ice_dpll_init_info - prepare pf's dpll information structure
+ * @pf: board private structure
+ * @cgu: if cgu is present and controlled by this NIC
+ *
+ * Acquire (from HW) and set basic dpll information (on pf->dplls struct).
+ *
+ * Return:
+ * * 0 - success
+ * * negative - init failure reason
+ */
+static int ice_dpll_init_info(struct ice_pf *pf, bool cgu)
+{
+ struct ice_aqc_get_cgu_abilities abilities;
+ struct ice_dpll *de = &pf->dplls.eec;
+ struct ice_dpll *dp = &pf->dplls.pps;
+ struct ice_dplls *d = &pf->dplls;
+ struct ice_hw *hw = &pf->hw;
+ int ret, alloc_size, i;
+
+ d->clock_id = ice_generate_clock_id(pf);
+ ret = ice_aq_get_cgu_abilities(hw, &abilities);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "err:%d %s failed to read cgu abilities\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+ return ret;
+ }
+
+ de->dpll_idx = abilities.eec_dpll_idx;
+ dp->dpll_idx = abilities.pps_dpll_idx;
+ d->num_inputs = abilities.num_inputs;
+ d->num_outputs = abilities.num_outputs;
+ d->input_phase_adj_max = le32_to_cpu(abilities.max_in_phase_adj);
+ d->output_phase_adj_max = le32_to_cpu(abilities.max_out_phase_adj);
+
+ alloc_size = sizeof(*d->inputs) * d->num_inputs;
+ d->inputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->inputs)
+ return -ENOMEM;
+
+ alloc_size = sizeof(*de->input_prio) * d->num_inputs;
+ de->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!de->input_prio)
+ return -ENOMEM;
+
+ dp->input_prio = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dp->input_prio)
+ return -ENOMEM;
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_INPUT);
+ if (ret)
+ goto deinit_info;
+
+ if (cgu) {
+ alloc_size = sizeof(*d->outputs) * d->num_outputs;
+ d->outputs = kzalloc(alloc_size, GFP_KERNEL);
+ if (!d->outputs) {
+ ret = -ENOMEM;
+ goto deinit_info;
+ }
+
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_OUTPUT);
+ if (ret)
+ goto deinit_info;
+ }
+
+ ret = ice_get_cgu_rclk_pin_info(&pf->hw, &d->base_rclk_idx,
+ &pf->dplls.rclk.num_parents);
+ if (ret)
+ return ret;
+ for (i = 0; i < pf->dplls.rclk.num_parents; i++)
+ pf->dplls.rclk.parent_idx[i] = d->base_rclk_idx + i;
+ ret = ice_dpll_init_pins_info(pf, ICE_DPLL_PIN_TYPE_RCLK_INPUT);
+ if (ret)
+ return ret;
+ de->mode = DPLL_MODE_AUTOMATIC;
+ dp->mode = DPLL_MODE_AUTOMATIC;
+
+ dev_dbg(ice_pf_to_dev(pf),
+ "%s - success, inputs:%u, outputs:%u rclk-parents:%u\n",
+ __func__, d->num_inputs, d->num_outputs, d->rclk.num_parents);
+
+ return 0;
+
+deinit_info:
+ dev_err(ice_pf_to_dev(pf),
+ "%s - fail: d->inputs:%p, de->input_prio:%p, dp->input_prio:%p, d->outputs:%p\n",
+ __func__, d->inputs, de->input_prio,
+ dp->input_prio, d->outputs);
+ ice_dpll_deinit_info(pf);
+ return ret;
+}
+
+/**
+ * ice_dpll_deinit - Disable the driver/HW support for dpll subsystem
+ * the dpll device.
+ * @pf: board private structure
+ *
+ * Handles the cleanup work required after dpll initialization, freeing
+ * resources and unregistering the dpll, pin and all resources used for
+ * handling them.
+ *
+ * Context: Destroys pf->dplls.lock mutex. Call only if ICE_FLAG_DPLL was set.
+ */
+void ice_dpll_deinit(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+
+ clear_bit(ICE_FLAG_DPLL, pf->flags);
+ if (cgu)
+ ice_dpll_deinit_worker(pf);
+
+ ice_dpll_deinit_pins(pf, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+ ice_dpll_deinit_info(pf);
+ mutex_destroy(&pf->dplls.lock);
+}
+
+/**
+ * ice_dpll_init - initialize support for dpll subsystem
+ * @pf: board private structure
+ *
+ * Set up the device dplls, register them and pins connected within Linux dpll
+ * subsystem. Allow userspace to obtain state of DPLL and handling of DPLL
+ * configuration requests.
+ *
+ * Context: Initializes pf->dplls.lock mutex.
+ */
+void ice_dpll_init(struct ice_pf *pf)
+{
+ bool cgu = ice_is_feature_supported(pf, ICE_F_CGU);
+ struct ice_dplls *d = &pf->dplls;
+ int err = 0;
+
+ err = ice_dpll_init_info(pf, cgu);
+ if (err)
+ goto err_exit;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.eec, cgu, DPLL_TYPE_EEC);
+ if (err)
+ goto deinit_info;
+ err = ice_dpll_init_dpll(pf, &pf->dplls.pps, cgu, DPLL_TYPE_PPS);
+ if (err)
+ goto deinit_eec;
+ err = ice_dpll_init_pins(pf, cgu);
+ if (err)
+ goto deinit_pps;
+ mutex_init(&d->lock);
+ if (cgu) {
+ err = ice_dpll_init_worker(pf);
+ if (err)
+ goto deinit_pins;
+ }
+ set_bit(ICE_FLAG_DPLL, pf->flags);
+
+ return;
+
+deinit_pins:
+ ice_dpll_deinit_pins(pf, cgu);
+deinit_pps:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.pps, cgu);
+deinit_eec:
+ ice_dpll_deinit_dpll(pf, &pf->dplls.eec, cgu);
+deinit_info:
+ ice_dpll_deinit_info(pf);
+err_exit:
+ mutex_destroy(&d->lock);
+ dev_warn(ice_pf_to_dev(pf), "DPLLs init failure err:%d\n", err);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_dpll.h b/drivers/net/ethernet/intel/ice/ice_dpll.h
new file mode 100644
index 000000000000..93172e93995b
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_dpll.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2022, Intel Corporation. */
+
+#ifndef _ICE_DPLL_H_
+#define _ICE_DPLL_H_
+
+#include "ice.h"
+
+#define ICE_DPLL_RCLK_NUM_MAX 4
+
+/** ice_dpll_pin - store info about pins
+ * @pin: dpll pin structure
+ * @pf: pointer to pf, which has registered the dpll_pin
+ * @idx: ice pin private idx
+ * @num_parents: hols number of parent pins
+ * @parent_idx: hold indexes of parent pins
+ * @flags: pin flags returned from HW
+ * @state: state of a pin
+ * @prop: pin properties
+ * @freq: current frequency of a pin
+ * @phase_adjust: current phase adjust value
+ */
+struct ice_dpll_pin {
+ struct dpll_pin *pin;
+ struct ice_pf *pf;
+ u8 idx;
+ u8 num_parents;
+ u8 parent_idx[ICE_DPLL_RCLK_NUM_MAX];
+ u8 flags[ICE_DPLL_RCLK_NUM_MAX];
+ u8 state[ICE_DPLL_RCLK_NUM_MAX];
+ struct dpll_pin_properties prop;
+ u32 freq;
+ s32 phase_adjust;
+};
+
+/** ice_dpll - store info required for DPLL control
+ * @dpll: pointer to dpll dev
+ * @pf: pointer to pf, which has registered the dpll_device
+ * @dpll_idx: index of dpll on the NIC
+ * @input_idx: currently selected input index
+ * @prev_input_idx: previously selected input index
+ * @ref_state: state of dpll reference signals
+ * @eec_mode: eec_mode dpll is configured for
+ * @phase_offset: phase offset of active pin vs dpll signal
+ * @prev_phase_offset: previous phase offset of active pin vs dpll signal
+ * @input_prio: priorities of each input
+ * @dpll_state: current dpll sync state
+ * @prev_dpll_state: last dpll sync state
+ * @active_input: pointer to active input pin
+ * @prev_input: pointer to previous active input pin
+ */
+struct ice_dpll {
+ struct dpll_device *dpll;
+ struct ice_pf *pf;
+ u8 dpll_idx;
+ u8 input_idx;
+ u8 prev_input_idx;
+ u8 ref_state;
+ u8 eec_mode;
+ s64 phase_offset;
+ s64 prev_phase_offset;
+ u8 *input_prio;
+ enum dpll_lock_status dpll_state;
+ enum dpll_lock_status prev_dpll_state;
+ enum dpll_mode mode;
+ struct dpll_pin *active_input;
+ struct dpll_pin *prev_input;
+};
+
+/** ice_dplls - store info required for CCU (clock controlling unit)
+ * @kworker: periodic worker
+ * @work: periodic work
+ * @lock: locks access to configuration of a dpll
+ * @eec: pointer to EEC dpll dev
+ * @pps: pointer to PPS dpll dev
+ * @inputs: input pins pointer
+ * @outputs: output pins pointer
+ * @rclk: recovered pins pointer
+ * @num_inputs: number of input pins available on dpll
+ * @num_outputs: number of output pins available on dpll
+ * @cgu_state_acq_err_num: number of errors returned during periodic work
+ * @base_rclk_idx: idx of first pin used for clock revocery pins
+ * @clock_id: clock_id of dplls
+ * @input_phase_adj_max: max phase adjust value for an input pins
+ * @output_phase_adj_max: max phase adjust value for an output pins
+ */
+struct ice_dplls {
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work work;
+ struct mutex lock;
+ struct ice_dpll eec;
+ struct ice_dpll pps;
+ struct ice_dpll_pin *inputs;
+ struct ice_dpll_pin *outputs;
+ struct ice_dpll_pin rclk;
+ u8 num_inputs;
+ u8 num_outputs;
+ int cgu_state_acq_err_num;
+ u8 base_rclk_idx;
+ u64 clock_id;
+ s32 input_phase_adj_max;
+ s32 output_phase_adj_max;
+};
+
+#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+void ice_dpll_init(struct ice_pf *pf);
+void ice_dpll_deinit(struct ice_pf *pf);
+#else
+static inline void ice_dpll_init(struct ice_pf *pf) { }
+static inline void ice_dpll_deinit(struct ice_pf *pf) { }
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index 8f232c41a89e..a655d499abfa 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_eswitch.h"
+#include "ice_eswitch_br.h"
#include "ice_fltr.h"
#include "ice_repr.h"
#include "ice_devlink.h"
@@ -83,10 +84,6 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
struct ice_vsi_vlan_ops *vlan_ops;
bool rule_added = false;
- vlan_ops = ice_get_compat_vsi_vlan_ops(ctrl_vsi);
- if (vlan_ops->dis_stripping(ctrl_vsi))
- return -ENODEV;
-
ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
netif_addr_lock_bh(uplink_netdev);
@@ -103,17 +100,28 @@ static int ice_eswitch_setup_env(struct ice_pf *pf)
rule_added = true;
}
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+ if (vlan_ops->dis_rx_filtering(uplink_vsi))
+ goto err_dis_rx;
+
if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_uplink;
if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
goto err_override_control;
+ if (ice_vsi_update_local_lb(uplink_vsi, true))
+ goto err_override_local_lb;
+
return 0;
+err_override_local_lb:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
err_override_control:
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
err_override_uplink:
+ vlan_ops->ena_rx_filtering(uplink_vsi);
+err_dis_rx:
if (rule_added)
ice_clear_dflt_vsi(uplink_vsi);
err_def_rx:
@@ -306,6 +314,9 @@ void ice_eswitch_update_repr(struct ice_vsi *vsi)
repr->src_vsi = vsi;
repr->dst->u.port_info.port_id = vsi->vsi_num;
+ if (repr->br_port)
+ repr->br_port->vsi = vsi;
+
ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
if (ret) {
ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr, ICE_FWD_TO_VSI);
@@ -331,6 +342,9 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
np = netdev_priv(netdev);
vsi = np->vsi;
+ if (!vsi || !ice_is_switchdev_running(vsi->back))
+ return NETDEV_TX_BUSY;
+
if (ice_is_reset_in_progress(vsi->back->state) ||
test_bit(ICE_VF_DIS, vsi->back->state))
return NETDEV_TX_BUSY;
@@ -378,9 +392,14 @@ static void ice_eswitch_release_env(struct ice_pf *pf)
{
struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_vsi_vlan_ops *vlan_ops;
+ vlan_ops = ice_get_compat_vsi_vlan_ops(uplink_vsi);
+
+ ice_vsi_update_local_lb(uplink_vsi, false);
ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ vlan_ops->ena_rx_filtering(uplink_vsi);
ice_clear_dflt_vsi(uplink_vsi);
ice_fltr_add_mac_and_broadcast(uplink_vsi,
uplink_vsi->port_info->mac.perm_addr,
@@ -455,16 +474,24 @@ static void ice_eswitch_napi_disable(struct ice_pf *pf)
*/
static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
{
- struct ice_vsi *ctrl_vsi;
+ struct ice_vsi *ctrl_vsi, *uplink_vsi;
+
+ uplink_vsi = ice_get_main_vsi(pf);
+ if (!uplink_vsi)
+ return -ENODEV;
+
+ if (netif_is_any_bridge_port(uplink_vsi->netdev)) {
+ dev_err(ice_pf_to_dev(pf),
+ "Uplink port cannot be a bridge port\n");
+ return -EINVAL;
+ }
pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
if (!pf->switchdev.control_vsi)
return -ENODEV;
ctrl_vsi = pf->switchdev.control_vsi;
- pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
- if (!pf->switchdev.uplink_vsi)
- goto err_vsi;
+ pf->switchdev.uplink_vsi = uplink_vsi;
if (ice_eswitch_setup_env(pf))
goto err_vsi;
@@ -480,10 +507,15 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
if (ice_vsi_open(ctrl_vsi))
goto err_setup_reprs;
+ if (ice_eswitch_br_offloads_init(pf))
+ goto err_br_offloads;
+
ice_eswitch_napi_enable(pf);
return 0;
+err_br_offloads:
+ ice_vsi_close(ctrl_vsi);
err_setup_reprs:
ice_repr_rem_from_all_vfs(pf);
err_repr_add:
@@ -502,8 +534,8 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
ice_eswitch_napi_disable(pf);
+ ice_eswitch_br_offloads_deinit(pf);
ice_eswitch_release_env(pf);
- ice_rem_adv_rule_for_vsi(&pf->hw, ctrl_vsi->idx);
ice_eswitch_release_reprs(pf, ctrl_vsi);
ice_vsi_release(ctrl_vsi);
ice_repr_rem_from_all_vfs(pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
new file mode 100644
index 000000000000..6ae0269bdf73
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c
@@ -0,0 +1,1346 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2023, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch_br.h"
+#include "ice_repr.h"
+#include "ice_switch.h"
+#include "ice_vlan.h"
+#include "ice_vf_vsi_vlan_ops.h"
+#include "ice_trace.h"
+
+#define ICE_ESW_BRIDGE_UPDATE_INTERVAL msecs_to_jiffies(1000)
+
+static const struct rhashtable_params ice_fdb_ht_params = {
+ .key_offset = offsetof(struct ice_esw_br_fdb_entry, data),
+ .key_len = sizeof(struct ice_esw_br_fdb_data),
+ .head_offset = offsetof(struct ice_esw_br_fdb_entry, ht_node),
+ .automatic_shrinking = true,
+};
+
+static bool ice_eswitch_br_is_dev_valid(const struct net_device *dev)
+{
+ /* Accept only PF netdev, PRs and LAG */
+ return ice_is_port_repr_netdev(dev) || netif_is_ice(dev) ||
+ netif_is_lag_master(dev);
+}
+
+static struct net_device *
+ice_eswitch_br_get_uplink_from_lag(struct net_device *lag_dev)
+{
+ struct net_device *lower;
+ struct list_head *iter;
+
+ netdev_for_each_lower_dev(lag_dev, lower, iter) {
+ if (netif_is_ice(lower))
+ return lower;
+ }
+
+ return NULL;
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_netdev_to_port(struct net_device *dev)
+{
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ return repr->br_port;
+ } else if (netif_is_ice(dev) || netif_is_lag_master(dev)) {
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return NULL;
+
+ pf = ice_netdev_to_pf(ice_dev);
+
+ return pf->br_port;
+ }
+
+ return NULL;
+}
+
+static void
+ice_eswitch_br_ingress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u8 pf_id, u16 vf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = vf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_RX;
+ rule_info->sw_act.src = pf_id;
+ rule_info->priority = 2;
+}
+
+static void
+ice_eswitch_br_egress_rule_setup(struct ice_adv_rule_info *rule_info,
+ u16 pf_vsi_idx)
+{
+ rule_info->sw_act.vsi_handle = pf_vsi_idx;
+ rule_info->sw_act.flag |= ICE_FLTR_TX;
+ rule_info->flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info->flags_info.act_valid = true;
+ rule_info->priority = 2;
+}
+
+static int
+ice_eswitch_br_rule_delete(struct ice_hw *hw, struct ice_rule_query_data *rule)
+{
+ int err;
+
+ if (!rule)
+ return -EINVAL;
+
+ err = ice_rem_adv_rule_by_id(hw, rule);
+ kfree(rule);
+
+ return err;
+}
+
+static u16
+ice_eswitch_br_get_lkups_cnt(u16 vid)
+{
+ return ice_eswitch_br_is_vid_valid(vid) ? 2 : 1;
+}
+
+static void
+ice_eswitch_br_add_vlan_lkup(struct ice_adv_lkup_elem *list, u16 vid)
+{
+ if (ice_eswitch_br_is_vid_valid(vid)) {
+ list[1].type = ICE_VLAN_OFOS;
+ list[1].h_u.vlan_hdr.vlan = cpu_to_be16(vid & VLAN_VID_MASK);
+ list[1].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ }
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_fwd_rule_create(struct ice_hw *hw, int vsi_idx, int port_type,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ u16 lkups_cnt;
+ int err;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return ERR_PTR(-ENOMEM);
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list) {
+ err = -ENOMEM;
+ goto err_list_alloc;
+ }
+
+ switch (port_type) {
+ case ICE_ESWITCH_BR_UPLINK_PORT:
+ ice_eswitch_br_egress_rule_setup(&rule_info, vsi_idx);
+ break;
+ case ICE_ESWITCH_BR_VF_REPR_PORT:
+ ice_eswitch_br_ingress_rule_setup(&rule_info, hw->pf_id,
+ vsi_idx);
+ break;
+ default:
+ err = -EINVAL;
+ goto err_add_rule;
+ }
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.dst_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.dst_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.need_pass_l2 = true;
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_rule_query_data *
+ice_eswitch_br_guard_rule_create(struct ice_hw *hw, u16 vsi_idx,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data *rule;
+ struct ice_adv_lkup_elem *list;
+ int err = -ENOMEM;
+ u16 lkups_cnt;
+
+ lkups_cnt = ice_eswitch_br_get_lkups_cnt(vid);
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ goto err_exit;
+
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ goto err_list_alloc;
+
+ list[0].type = ICE_MAC_OFOS;
+ ether_addr_copy(list[0].h_u.eth_hdr.src_addr, mac);
+ eth_broadcast_addr(list[0].m_u.eth_hdr.src_addr);
+
+ ice_eswitch_br_add_vlan_lkup(list, vid);
+
+ rule_info.allow_pass_l2 = true;
+ rule_info.sw_act.vsi_handle = vsi_idx;
+ rule_info.sw_act.fltr_act = ICE_NOP;
+ rule_info.priority = 2;
+
+ err = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, rule);
+ if (err)
+ goto err_add_rule;
+
+ kfree(list);
+
+ return rule;
+
+err_add_rule:
+ kfree(list);
+err_list_alloc:
+ kfree(rule);
+err_exit:
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_flow *
+ice_eswitch_br_flow_create(struct device *dev, struct ice_hw *hw, int vsi_idx,
+ int port_type, const unsigned char *mac, u16 vid)
+{
+ struct ice_rule_query_data *fwd_rule, *guard_rule;
+ struct ice_esw_br_flow *flow;
+ int err;
+
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
+ if (!flow)
+ return ERR_PTR(-ENOMEM);
+
+ fwd_rule = ice_eswitch_br_fwd_rule_create(hw, vsi_idx, port_type, mac,
+ vid);
+ err = PTR_ERR_OR_ZERO(fwd_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress forward rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_fwd_rule;
+ }
+
+ guard_rule = ice_eswitch_br_guard_rule_create(hw, vsi_idx, mac, vid);
+ err = PTR_ERR_OR_ZERO(guard_rule);
+ if (err) {
+ dev_err(dev, "Failed to create eswitch bridge %sgress guard rule, err: %d\n",
+ port_type == ICE_ESWITCH_BR_UPLINK_PORT ? "e" : "in",
+ err);
+ goto err_guard_rule;
+ }
+
+ flow->fwd_rule = fwd_rule;
+ flow->guard_rule = guard_rule;
+
+ return flow;
+
+err_guard_rule:
+ ice_eswitch_br_rule_delete(hw, fwd_rule);
+err_fwd_rule:
+ kfree(flow);
+
+ return ERR_PTR(err);
+}
+
+static struct ice_esw_br_fdb_entry *
+ice_eswitch_br_fdb_find(struct ice_esw_br *bridge, const unsigned char *mac,
+ u16 vid)
+{
+ struct ice_esw_br_fdb_data data = {
+ .vid = vid,
+ };
+
+ ether_addr_copy(data.addr, mac);
+ return rhashtable_lookup_fast(&bridge->fdb_ht, &data,
+ ice_fdb_ht_params);
+}
+
+static void
+ice_eswitch_br_flow_delete(struct ice_pf *pf, struct ice_esw_br_flow *flow)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->fwd_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB forward rule, err: %d\n",
+ err);
+
+ err = ice_eswitch_br_rule_delete(&pf->hw, flow->guard_rule);
+ if (err)
+ dev_err(dev, "Failed to delete FDB guard rule, err: %d\n",
+ err);
+
+ kfree(flow);
+}
+
+static struct ice_esw_br_vlan *
+ice_esw_br_port_vlan_lookup(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port) {
+ dev_info(dev, "Bridge port lookup failed (vsi=%u)\n", vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan) {
+ dev_info(dev, "Bridge port vlan metadata lookup failed (vsi=%u)\n",
+ vsi_idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return vlan;
+}
+
+static void
+ice_eswitch_br_fdb_entry_delete(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *fdb_entry)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+
+ rhashtable_remove_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ list_del(&fdb_entry->list);
+
+ ice_eswitch_br_flow_delete(pf, fdb_entry->flow);
+
+ kfree(fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_offload_notify(struct net_device *dev,
+ const unsigned char *mac, u16 vid,
+ unsigned long val)
+{
+ struct switchdev_notifier_fdb_info fdb_info = {
+ .addr = mac,
+ .vid = vid,
+ .offloaded = true,
+ };
+
+ call_switchdev_notifiers(val, dev, &fdb_info.info, NULL);
+}
+
+static void
+ice_eswitch_br_fdb_entry_notify_and_cleanup(struct ice_esw_br *bridge,
+ struct ice_esw_br_fdb_entry *entry)
+{
+ if (!(entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER))
+ ice_eswitch_br_fdb_offload_notify(entry->dev, entry->data.addr,
+ entry->data.vid,
+ SWITCHDEV_FDB_DEL_TO_BRIDGE);
+ ice_eswitch_br_fdb_entry_delete(bridge, entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_find_and_delete(struct ice_esw_br *bridge,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct device *dev = ice_pf_to_dev(pf);
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (!fdb_entry) {
+ dev_err(dev, "FDB entry with mac: %pM and vid: %u not found\n",
+ mac, vid);
+ return;
+ }
+
+ trace_ice_eswitch_br_fdb_entry_find_and_delete(fdb_entry);
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+}
+
+static void
+ice_eswitch_br_fdb_entry_create(struct net_device *netdev,
+ struct ice_esw_br_port *br_port,
+ bool added_by_user,
+ const unsigned char *mac, u16 vid)
+{
+ struct ice_esw_br *bridge = br_port->bridge;
+ struct ice_pf *pf = bridge->br_offloads->pf;
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_esw_br_fdb_entry *fdb_entry;
+ struct ice_esw_br_flow *flow;
+ struct ice_esw_br_vlan *vlan;
+ struct ice_hw *hw = &pf->hw;
+ unsigned long event;
+ int err;
+
+ /* untagged filtering is not yet supported */
+ if (!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING) && vid)
+ return;
+
+ if ((bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING)) {
+ vlan = ice_esw_br_port_vlan_lookup(bridge, br_port->vsi_idx,
+ vid);
+ if (IS_ERR(vlan)) {
+ dev_err(dev, "Failed to find vlan lookup, err: %ld\n",
+ PTR_ERR(vlan));
+ return;
+ }
+ }
+
+ fdb_entry = ice_eswitch_br_fdb_find(bridge, mac, vid);
+ if (fdb_entry)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, fdb_entry);
+
+ fdb_entry = kzalloc(sizeof(*fdb_entry), GFP_KERNEL);
+ if (!fdb_entry) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ flow = ice_eswitch_br_flow_create(dev, hw, br_port->vsi_idx,
+ br_port->type, mac, vid);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto err_add_flow;
+ }
+
+ ether_addr_copy(fdb_entry->data.addr, mac);
+ fdb_entry->data.vid = vid;
+ fdb_entry->br_port = br_port;
+ fdb_entry->flow = flow;
+ fdb_entry->dev = netdev;
+ fdb_entry->last_use = jiffies;
+ event = SWITCHDEV_FDB_ADD_TO_BRIDGE;
+
+ if (added_by_user) {
+ fdb_entry->flags |= ICE_ESWITCH_BR_FDB_ADDED_BY_USER;
+ event = SWITCHDEV_FDB_OFFLOADED;
+ }
+
+ err = rhashtable_insert_fast(&bridge->fdb_ht, &fdb_entry->ht_node,
+ ice_fdb_ht_params);
+ if (err)
+ goto err_fdb_insert;
+
+ list_add(&fdb_entry->list, &bridge->fdb_list);
+ trace_ice_eswitch_br_fdb_entry_create(fdb_entry);
+
+ ice_eswitch_br_fdb_offload_notify(netdev, mac, vid, event);
+
+ return;
+
+err_fdb_insert:
+ ice_eswitch_br_flow_delete(pf, flow);
+err_add_flow:
+ kfree(fdb_entry);
+err_exit:
+ dev_err(dev, "Failed to create fdb entry, err: %d\n", err);
+}
+
+static void
+ice_eswitch_br_fdb_work_dealloc(struct ice_esw_br_fdb_work *fdb_work)
+{
+ kfree(fdb_work->fdb_info.addr);
+ kfree(fdb_work);
+}
+
+static void
+ice_eswitch_br_fdb_event_work(struct work_struct *work)
+{
+ struct ice_esw_br_fdb_work *fdb_work = ice_work_to_fdb_work(work);
+ bool added_by_user = fdb_work->fdb_info.added_by_user;
+ const unsigned char *mac = fdb_work->fdb_info.addr;
+ u16 vid = fdb_work->fdb_info.vid;
+ struct ice_esw_br_port *br_port;
+
+ rtnl_lock();
+
+ br_port = ice_eswitch_br_netdev_to_port(fdb_work->dev);
+ if (!br_port)
+ goto err_exit;
+
+ switch (fdb_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_create(fdb_work->dev, br_port,
+ added_by_user, mac, vid);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ ice_eswitch_br_fdb_entry_find_and_delete(br_port->bridge,
+ mac, vid);
+ break;
+ default:
+ goto err_exit;
+ }
+
+err_exit:
+ rtnl_unlock();
+ dev_put(fdb_work->dev);
+ ice_eswitch_br_fdb_work_dealloc(fdb_work);
+}
+
+static struct ice_esw_br_fdb_work *
+ice_eswitch_br_fdb_work_alloc(struct switchdev_notifier_fdb_info *fdb_info,
+ struct net_device *dev,
+ unsigned long event)
+{
+ struct ice_esw_br_fdb_work *work;
+ unsigned char *mac;
+
+ work = kzalloc(sizeof(*work), GFP_ATOMIC);
+ if (!work)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_WORK(&work->work, ice_eswitch_br_fdb_event_work);
+ memcpy(&work->fdb_info, fdb_info, sizeof(work->fdb_info));
+
+ mac = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!mac) {
+ kfree(work);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ether_addr_copy(mac, fdb_info->addr);
+ work->fdb_info.addr = mac;
+ work->event = event;
+ work->dev = dev;
+
+ return work;
+}
+
+static int
+ice_eswitch_br_switchdev_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info;
+ struct switchdev_notifier_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct ice_esw_br_fdb_work *work;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, switchdev_nb);
+ extack = switchdev_notifier_info_to_extack(ptr);
+
+ upper = netdev_master_upper_dev_get_rcu(dev);
+ if (!upper)
+ return NOTIFY_DONE;
+
+ if (!netif_is_bridge_master(upper))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return NOTIFY_DONE;
+
+ if (!ice_eswitch_br_netdev_to_port(dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb_info = container_of(info, typeof(*fdb_info), info);
+
+ work = ice_eswitch_br_fdb_work_alloc(fdb_info, dev, event);
+ if (IS_ERR(work)) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init switchdev fdb work");
+ return notifier_from_errno(PTR_ERR(work));
+ }
+ dev_hold(dev);
+
+ queue_work(br_offloads->wq, &work->work);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static void ice_eswitch_br_fdb_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+}
+
+static void
+ice_eswitch_br_vlan_filtering_set(struct ice_esw_br *bridge, bool enable)
+{
+ if (enable == !!(bridge->flags & ICE_ESWITCH_BR_VLAN_FILTERING))
+ return;
+
+ ice_eswitch_br_fdb_flush(bridge);
+ if (enable)
+ bridge->flags |= ICE_ESWITCH_BR_VLAN_FILTERING;
+ else
+ bridge->flags &= ~ICE_ESWITCH_BR_VLAN_FILTERING;
+}
+
+static void
+ice_eswitch_br_clear_pvid(struct ice_esw_br_port *port)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, port->pvid, 0);
+ struct ice_vsi_vlan_ops *vlan_ops;
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+
+ vlan_ops->del_vlan(port->vsi, &port_vlan);
+ vlan_ops->clear_port_vlan(port->vsi);
+
+ ice_vf_vsi_disable_port_vlan(port->vsi);
+
+ port->pvid = 0;
+}
+
+static void
+ice_eswitch_br_vlan_cleanup(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_esw_br *bridge = port->bridge;
+
+ trace_ice_eswitch_br_vlan_cleanup(vlan);
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (vlan->vid == fdb_entry->data.vid)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ xa_erase(&port->vlans, vlan->vid);
+ if (port->pvid == vlan->vid)
+ ice_eswitch_br_clear_pvid(port);
+ kfree(vlan);
+}
+
+static void ice_eswitch_br_port_vlans_flush(struct ice_esw_br_port *port)
+{
+ struct ice_esw_br_vlan *vlan;
+ unsigned long index;
+
+ xa_for_each(&port->vlans, index, vlan)
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_set_pvid(struct ice_esw_br_port *port,
+ struct ice_esw_br_vlan *vlan)
+{
+ struct ice_vlan port_vlan = ICE_VLAN(ETH_P_8021Q, vlan->vid, 0);
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_vsi_vlan_ops *vlan_ops;
+ int err;
+
+ if (port->pvid == vlan->vid || vlan->vid == 1)
+ return 0;
+
+ /* Setting port vlan on uplink isn't supported by hw */
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ return -EOPNOTSUPP;
+
+ if (port->pvid) {
+ dev_info(dev,
+ "Port VLAN (vsi=%u, vid=%u) already exists on the port, remove it before adding new one\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ ice_vf_vsi_enable_port_vlan(port->vsi);
+
+ vlan_ops = ice_get_compat_vsi_vlan_ops(port->vsi);
+ err = vlan_ops->set_port_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ err = vlan_ops->add_vlan(port->vsi, &port_vlan);
+ if (err)
+ return err;
+
+ ice_eswitch_br_port_vlans_flush(port);
+ port->pvid = vlan->vid;
+
+ return 0;
+}
+
+static struct ice_esw_br_vlan *
+ice_eswitch_br_vlan_create(u16 vid, u16 flags, struct ice_esw_br_port *port)
+{
+ struct device *dev = ice_pf_to_dev(port->vsi->back);
+ struct ice_esw_br_vlan *vlan;
+ int err;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return ERR_PTR(-ENOMEM);
+
+ vlan->vid = vid;
+ vlan->flags = flags;
+ if ((flags & BRIDGE_VLAN_INFO_PVID) &&
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ err = ice_eswitch_br_set_pvid(port, vlan);
+ if (err)
+ goto err_set_pvid;
+ } else if ((flags & BRIDGE_VLAN_INFO_PVID) ||
+ (flags & BRIDGE_VLAN_INFO_UNTAGGED)) {
+ dev_info(dev, "VLAN push and pop are supported only simultaneously\n");
+ err = -EOPNOTSUPP;
+ goto err_set_pvid;
+ }
+
+ err = xa_insert(&port->vlans, vlan->vid, vlan, GFP_KERNEL);
+ if (err)
+ goto err_insert;
+
+ trace_ice_eswitch_br_vlan_create(vlan);
+
+ return vlan;
+
+err_insert:
+ if (port->pvid)
+ ice_eswitch_br_clear_pvid(port);
+err_set_pvid:
+ kfree(vlan);
+ return ERR_PTR(err);
+}
+
+static int
+ice_eswitch_br_port_vlan_add(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid,
+ u16 flags, struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return -EINVAL;
+
+ if (port->pvid) {
+ dev_info(ice_pf_to_dev(port->vsi->back),
+ "Port VLAN (vsi=%u, vid=%d) exists on the port, remove it to add trunk VLANs\n",
+ port->vsi_idx, port->pvid);
+ return -EEXIST;
+ }
+
+ vlan = xa_load(&port->vlans, vid);
+ if (vlan) {
+ if (vlan->flags == flags)
+ return 0;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+ }
+
+ vlan = ice_eswitch_br_vlan_create(vid, flags, port);
+ if (IS_ERR(vlan)) {
+ NL_SET_ERR_MSG_FMT_MOD(extack, "Failed to create VLAN entry, vid: %u, vsi: %u",
+ vid, vsi_idx);
+ return PTR_ERR(vlan);
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_port_vlan_del(struct ice_esw_br *bridge, u16 vsi_idx, u16 vid)
+{
+ struct ice_esw_br_port *port;
+ struct ice_esw_br_vlan *vlan;
+
+ port = xa_load(&bridge->ports, vsi_idx);
+ if (!port)
+ return;
+
+ vlan = xa_load(&port->vlans, vid);
+ if (!vlan)
+ return;
+
+ ice_eswitch_br_vlan_cleanup(port, vlan);
+}
+
+static int
+ice_eswitch_br_port_obj_add(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+ int err;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ err = ice_eswitch_br_port_vlan_add(br_port->bridge,
+ br_port->vsi_idx, vlan->vid,
+ vlan->flags, extack);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_del(struct net_device *netdev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+ struct switchdev_obj_port_vlan *vlan;
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ ice_eswitch_br_port_vlan_del(br_port->bridge, br_port->vsi_idx,
+ vlan->vid);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_port_obj_attr_set(struct net_device *netdev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(netdev);
+
+ if (!br_port)
+ return -EINVAL;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
+ ice_eswitch_br_vlan_filtering_set(br_port->bridge,
+ attr->u.vlan_filtering);
+ return 0;
+ case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
+ br_port->bridge->ageing_time =
+ clock_t_to_jiffies(attr->u.ageing_time);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_eswitch_br_event_blocking(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_add);
+ break;
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_del);
+ break;
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ ice_eswitch_br_is_dev_valid,
+ ice_eswitch_br_port_obj_attr_set);
+ break;
+ default:
+ err = 0;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_port_deinit(struct ice_esw_br *bridge,
+ struct ice_esw_br_port *br_port)
+{
+ struct ice_esw_br_fdb_entry *fdb_entry, *tmp;
+ struct ice_vsi *vsi = br_port->vsi;
+
+ list_for_each_entry_safe(fdb_entry, tmp, &bridge->fdb_list, list) {
+ if (br_port == fdb_entry->br_port)
+ ice_eswitch_br_fdb_entry_delete(bridge, fdb_entry);
+ }
+
+ if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back)
+ vsi->back->br_port = NULL;
+ else if (vsi->vf && vsi->vf->repr)
+ vsi->vf->repr->br_port = NULL;
+
+ xa_erase(&bridge->ports, br_port->vsi_idx);
+ ice_eswitch_br_port_vlans_flush(br_port);
+ kfree(br_port);
+}
+
+static struct ice_esw_br_port *
+ice_eswitch_br_port_init(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *br_port;
+
+ br_port = kzalloc(sizeof(*br_port), GFP_KERNEL);
+ if (!br_port)
+ return ERR_PTR(-ENOMEM);
+
+ xa_init(&br_port->vlans);
+
+ br_port->bridge = bridge;
+
+ return br_port;
+}
+
+static int
+ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge,
+ struct ice_repr *repr)
+{
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = repr->src_vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT;
+ repr->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_uplink_port_init(struct ice_esw_br *bridge, struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.uplink_vsi;
+ struct ice_esw_br_port *br_port;
+ int err;
+
+ br_port = ice_eswitch_br_port_init(bridge);
+ if (IS_ERR(br_port))
+ return PTR_ERR(br_port);
+
+ br_port->vsi = vsi;
+ br_port->vsi_idx = br_port->vsi->idx;
+ br_port->type = ICE_ESWITCH_BR_UPLINK_PORT;
+ pf->br_port = br_port;
+
+ err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL);
+ if (err) {
+ ice_eswitch_br_port_deinit(bridge, br_port);
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+ice_eswitch_br_ports_flush(struct ice_esw_br *bridge)
+{
+ struct ice_esw_br_port *port;
+ unsigned long i;
+
+ xa_for_each(&bridge->ports, i, port)
+ ice_eswitch_br_port_deinit(bridge, port);
+}
+
+static void
+ice_eswitch_br_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ if (!bridge)
+ return;
+
+ /* Cleanup all the ports that were added asynchronously
+ * through NETDEV_CHANGEUPPER event.
+ */
+ ice_eswitch_br_ports_flush(bridge);
+ WARN_ON(!xa_empty(&bridge->ports));
+ xa_destroy(&bridge->ports);
+ rhashtable_destroy(&bridge->fdb_ht);
+
+ br_offloads->bridge = NULL;
+ kfree(bridge);
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_init(struct ice_esw_br_offloads *br_offloads, int ifindex)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return ERR_PTR(-ENOMEM);
+
+ err = rhashtable_init(&bridge->fdb_ht, &ice_fdb_ht_params);
+ if (err) {
+ kfree(bridge);
+ return ERR_PTR(err);
+ }
+
+ INIT_LIST_HEAD(&bridge->fdb_list);
+ bridge->br_offloads = br_offloads;
+ bridge->ifindex = ifindex;
+ bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
+ xa_init(&bridge->ports);
+ br_offloads->bridge = bridge;
+
+ return bridge;
+}
+
+static struct ice_esw_br *
+ice_eswitch_br_get(struct ice_esw_br_offloads *br_offloads, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+
+ if (bridge) {
+ if (bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Only one bridge is supported per eswitch");
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+ return bridge;
+ }
+
+ /* Create the bridge if it doesn't exist yet */
+ bridge = ice_eswitch_br_init(br_offloads, ifindex);
+ if (IS_ERR(bridge))
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init the bridge");
+
+ return bridge;
+}
+
+static void
+ice_eswitch_br_verify_deinit(struct ice_esw_br_offloads *br_offloads,
+ struct ice_esw_br *bridge)
+{
+ /* Remove the bridge if it exists and there are no ports left */
+ if (!bridge || !xa_empty(&bridge->ports))
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, bridge);
+}
+
+static int
+ice_eswitch_br_port_unlink(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br_port *br_port = ice_eswitch_br_netdev_to_port(dev);
+ struct ice_esw_br *bridge;
+
+ if (!br_port) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is not attached to any bridge");
+ return -EINVAL;
+ }
+
+ if (br_port->bridge->ifindex != ifindex) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port representor is attached to another bridge");
+ return -EINVAL;
+ }
+
+ bridge = br_port->bridge;
+
+ trace_ice_eswitch_br_port_unlink(br_port);
+ ice_eswitch_br_port_deinit(br_port->bridge, br_port);
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+
+ return 0;
+}
+
+static int
+ice_eswitch_br_port_link(struct ice_esw_br_offloads *br_offloads,
+ struct net_device *dev, int ifindex,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_esw_br *bridge;
+ int err;
+
+ if (ice_eswitch_br_netdev_to_port(dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port is already attached to the bridge");
+ return -EINVAL;
+ }
+
+ bridge = ice_eswitch_br_get(br_offloads, ifindex, extack);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+
+ if (ice_is_port_repr_netdev(dev)) {
+ struct ice_repr *repr = ice_netdev_to_repr(dev);
+
+ err = ice_eswitch_br_vf_repr_port_init(bridge, repr);
+ trace_ice_eswitch_br_port_link(repr->br_port);
+ } else {
+ struct net_device *ice_dev;
+ struct ice_pf *pf;
+
+ if (netif_is_lag_master(dev))
+ ice_dev = ice_eswitch_br_get_uplink_from_lag(dev);
+ else
+ ice_dev = dev;
+
+ if (!ice_dev)
+ return 0;
+
+ pf = ice_netdev_to_pf(ice_dev);
+
+ err = ice_eswitch_br_uplink_port_init(bridge, pf);
+ trace_ice_eswitch_br_port_link(pf->br_port);
+ }
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to init bridge port");
+ goto err_port_init;
+ }
+
+ return 0;
+
+err_port_init:
+ ice_eswitch_br_verify_deinit(br_offloads, bridge);
+ return err;
+}
+
+static int
+ice_eswitch_br_port_changeupper(struct notifier_block *nb, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct ice_esw_br_offloads *br_offloads;
+ struct netlink_ext_ack *extack;
+ struct net_device *upper;
+
+ br_offloads = ice_nb_to_br_offloads(nb, netdev_nb);
+
+ if (!ice_eswitch_br_is_dev_valid(dev))
+ return 0;
+
+ upper = info->upper_dev;
+ if (!netif_is_bridge_master(upper))
+ return 0;
+
+ extack = netdev_notifier_info_to_extack(&info->info);
+
+ if (info->linking)
+ return ice_eswitch_br_port_link(br_offloads, dev,
+ upper->ifindex, extack);
+ else
+ return ice_eswitch_br_port_unlink(br_offloads, dev,
+ upper->ifindex, extack);
+}
+
+static int
+ice_eswitch_br_port_event(struct notifier_block *nb,
+ unsigned long event, void *ptr)
+{
+ int err = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ err = ice_eswitch_br_port_changeupper(nb, ptr);
+ break;
+ }
+
+ return notifier_from_errno(err);
+}
+
+static void
+ice_eswitch_br_offloads_dealloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads = pf->switchdev.br_offloads;
+
+ ASSERT_RTNL();
+
+ if (!br_offloads)
+ return;
+
+ ice_eswitch_br_deinit(br_offloads, br_offloads->bridge);
+
+ pf->switchdev.br_offloads = NULL;
+ kfree(br_offloads);
+}
+
+static struct ice_esw_br_offloads *
+ice_eswitch_br_offloads_alloc(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ ASSERT_RTNL();
+
+ if (pf->switchdev.br_offloads)
+ return ERR_PTR(-EEXIST);
+
+ br_offloads = kzalloc(sizeof(*br_offloads), GFP_KERNEL);
+ if (!br_offloads)
+ return ERR_PTR(-ENOMEM);
+
+ pf->switchdev.br_offloads = br_offloads;
+ br_offloads->pf = pf;
+
+ return br_offloads;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = pf->switchdev.br_offloads;
+ if (!br_offloads)
+ return;
+
+ cancel_delayed_work_sync(&br_offloads->update_work);
+ unregister_netdevice_notifier(&br_offloads->netdev_nb);
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+ destroy_workqueue(br_offloads->wq);
+ /* Although notifier block is unregistered just before,
+ * so we don't get any new events, some events might be
+ * already in progress. Hold the rtnl lock and wait for
+ * them to finished.
+ */
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update(struct ice_esw_br_offloads *br_offloads)
+{
+ struct ice_esw_br *bridge = br_offloads->bridge;
+ struct ice_esw_br_fdb_entry *entry, *tmp;
+
+ if (!bridge)
+ return;
+
+ rtnl_lock();
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
+ if (entry->flags & ICE_ESWITCH_BR_FDB_ADDED_BY_USER)
+ continue;
+
+ if (time_is_after_eq_jiffies(entry->last_use +
+ bridge->ageing_time))
+ continue;
+
+ ice_eswitch_br_fdb_entry_notify_and_cleanup(bridge, entry);
+ }
+ rtnl_unlock();
+}
+
+static void ice_eswitch_br_update_work(struct work_struct *work)
+{
+ struct ice_esw_br_offloads *br_offloads;
+
+ br_offloads = ice_work_to_br_offloads(work);
+
+ ice_eswitch_br_update(br_offloads);
+
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+}
+
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf)
+{
+ struct ice_esw_br_offloads *br_offloads;
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ rtnl_lock();
+ br_offloads = ice_eswitch_br_offloads_alloc(pf);
+ rtnl_unlock();
+ if (IS_ERR(br_offloads)) {
+ dev_err(dev, "Failed to init eswitch bridge\n");
+ return PTR_ERR(br_offloads);
+ }
+
+ br_offloads->wq = alloc_ordered_workqueue("ice_bridge_wq", 0);
+ if (!br_offloads->wq) {
+ err = -ENOMEM;
+ dev_err(dev, "Failed to allocate bridge workqueue\n");
+ goto err_alloc_wq;
+ }
+
+ br_offloads->switchdev_nb.notifier_call =
+ ice_eswitch_br_switchdev_event;
+ err = register_switchdev_notifier(&br_offloads->switchdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register switchdev notifier\n");
+ goto err_reg_switchdev_nb;
+ }
+
+ br_offloads->switchdev_blk.notifier_call =
+ ice_eswitch_br_event_blocking;
+ err = register_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge blocking switchdev notifier\n");
+ goto err_reg_switchdev_blk;
+ }
+
+ br_offloads->netdev_nb.notifier_call = ice_eswitch_br_port_event;
+ err = register_netdevice_notifier(&br_offloads->netdev_nb);
+ if (err) {
+ dev_err(dev,
+ "Failed to register bridge port event notifier\n");
+ goto err_reg_netdev_nb;
+ }
+
+ INIT_DELAYED_WORK(&br_offloads->update_work,
+ ice_eswitch_br_update_work);
+ queue_delayed_work(br_offloads->wq, &br_offloads->update_work,
+ ICE_ESW_BRIDGE_UPDATE_INTERVAL);
+
+ return 0;
+
+err_reg_netdev_nb:
+ unregister_switchdev_blocking_notifier(&br_offloads->switchdev_blk);
+err_reg_switchdev_blk:
+ unregister_switchdev_notifier(&br_offloads->switchdev_nb);
+err_reg_switchdev_nb:
+ destroy_workqueue(br_offloads->wq);
+err_alloc_wq:
+ rtnl_lock();
+ ice_eswitch_br_offloads_dealloc(pf);
+ rtnl_unlock();
+
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
new file mode 100644
index 000000000000..85a8fadb2928
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2023, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_BR_H_
+#define _ICE_ESWITCH_BR_H_
+
+#include <linux/rhashtable.h>
+#include <linux/workqueue.h>
+
+struct ice_esw_br_fdb_data {
+ unsigned char addr[ETH_ALEN];
+ u16 vid;
+};
+
+struct ice_esw_br_flow {
+ struct ice_rule_query_data *fwd_rule;
+ struct ice_rule_query_data *guard_rule;
+};
+
+enum {
+ ICE_ESWITCH_BR_FDB_ADDED_BY_USER = BIT(0),
+};
+
+struct ice_esw_br_fdb_entry {
+ struct ice_esw_br_fdb_data data;
+ struct rhash_head ht_node;
+ struct list_head list;
+
+ int flags;
+
+ struct net_device *dev;
+ struct ice_esw_br_port *br_port;
+ struct ice_esw_br_flow *flow;
+
+ unsigned long last_use;
+};
+
+enum ice_esw_br_port_type {
+ ICE_ESWITCH_BR_UPLINK_PORT = 0,
+ ICE_ESWITCH_BR_VF_REPR_PORT = 1,
+};
+
+struct ice_esw_br_port {
+ struct ice_esw_br *bridge;
+ struct ice_vsi *vsi;
+ enum ice_esw_br_port_type type;
+ u16 vsi_idx;
+ u16 pvid;
+ struct xarray vlans;
+};
+
+enum {
+ ICE_ESWITCH_BR_VLAN_FILTERING = BIT(0),
+};
+
+struct ice_esw_br {
+ struct ice_esw_br_offloads *br_offloads;
+ struct xarray ports;
+
+ struct rhashtable fdb_ht;
+ struct list_head fdb_list;
+
+ int ifindex;
+ u32 flags;
+ unsigned long ageing_time;
+};
+
+struct ice_esw_br_offloads {
+ struct ice_pf *pf;
+ struct ice_esw_br *bridge;
+ struct notifier_block netdev_nb;
+ struct notifier_block switchdev_blk;
+ struct notifier_block switchdev_nb;
+
+ struct workqueue_struct *wq;
+ struct delayed_work update_work;
+};
+
+struct ice_esw_br_fdb_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct net_device *dev;
+ unsigned long event;
+};
+
+struct ice_esw_br_vlan {
+ u16 vid;
+ u16 flags;
+};
+
+#define ice_nb_to_br_offloads(nb, nb_name) \
+ container_of(nb, \
+ struct ice_esw_br_offloads, \
+ nb_name)
+
+#define ice_work_to_br_offloads(w) \
+ container_of(w, \
+ struct ice_esw_br_offloads, \
+ update_work.work)
+
+#define ice_work_to_fdb_work(w) \
+ container_of(w, \
+ struct ice_esw_br_fdb_work, \
+ work)
+
+static inline bool ice_eswitch_br_is_vid_valid(u16 vid)
+{
+ /* In trunk VLAN mode, for untagged traffic the bridge sends requests
+ * to offload VLAN 1 with pvid and untagged flags set. Since these
+ * flags are not supported, add a MAC filter instead.
+ */
+ return vid > 1;
+}
+
+void
+ice_eswitch_br_offloads_deinit(struct ice_pf *pf);
+int
+ice_eswitch_br_offloads_init(struct ice_pf *pf);
+
+#endif /* _ICE_ESWITCH_BR_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index ad4d4702129f..bde9bc74f928 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -345,6 +345,88 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
+static const u32 ice_adv_lnk_speed_100[] __initconst = {
+ ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_1000[] __initconst = {
+ ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_2500[] __initconst = {
+ ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_2500baseX_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_5000[] __initconst = {
+ ETHTOOL_LINK_MODE_5000baseT_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_10000[] __initconst = {
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_25000[] __initconst = {
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_40000[] __initconst = {
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_50000[] __initconst = {
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_100000[] __initconst = {
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT,
+ ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT,
+};
+
+static const u32 ice_adv_lnk_speed_200000[] __initconst = {
+ ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseDR4_Full_BIT,
+ ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+};
+
+static struct ethtool_forced_speed_map ice_adv_lnk_speed_maps[] __ro_after_init = {
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 1000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 2500),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 5000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 10000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 25000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 40000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 50000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 100000),
+ ETHTOOL_FORCED_SPEED_MAP(ice_adv_lnk_speed, 200000),
+};
+
+void __init ice_adv_lnk_speed_maps_init(void)
+{
+ ethtool_forced_speed_maps_init(ice_adv_lnk_speed_maps,
+ ARRAY_SIZE(ice_adv_lnk_speed_maps));
+}
+
static void
__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
struct ice_vsi *vsi)
@@ -1060,7 +1142,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ICE_VSI_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
ice_gstrings_vsi_stats[i].stat_string);
if (ice_is_port_repr_netdev(netdev))
@@ -1080,7 +1162,7 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
return;
for (i = 0; i < ICE_PF_STATS_LEN; i++)
- ethtool_sprintf(&p,
+ ethtool_sprintf(&p, "%s",
ice_gstrings_pf_stats[i].stat_string);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
@@ -1097,7 +1179,8 @@ __ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
break;
case ETH_SS_PRIV_FLAGS:
for (i = 0; i < ICE_PRIV_FLAG_ARRAY_SIZE; i++)
- ethtool_sprintf(&p, ice_gstrings_priv_flags[i].name);
+ ethtool_sprintf(&p, "%s",
+ ice_gstrings_priv_flags[i].name);
break;
default:
break;
@@ -1638,6 +1721,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC | \
ICE_PHY_TYPE_HIGH_100G_AUI2)
+#define ICE_PHY_TYPE_HIGH_MASK_200G (ICE_PHY_TYPE_HIGH_200G_CR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_SR4 | \
+ ICE_PHY_TYPE_HIGH_200G_FR4 | \
+ ICE_PHY_TYPE_HIGH_200G_LR4 | \
+ ICE_PHY_TYPE_HIGH_200G_DR4 | \
+ ICE_PHY_TYPE_HIGH_200G_KR4_PAM4 | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4_AOC_ACC | \
+ ICE_PHY_TYPE_HIGH_200G_AUI4)
+
/**
* ice_mask_min_supported_speeds
* @hw: pointer to the HW structure
@@ -1652,8 +1744,9 @@ ice_mask_min_supported_speeds(struct ice_hw *hw,
u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
- if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
- phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
+ if ((*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G) ||
+ (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G) ||
+ (phy_types_high & ICE_PHY_TYPE_HIGH_MASK_200G))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
@@ -1757,14 +1850,14 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
linkmode_zero(ks->link_modes.supported);
linkmode_zero(ks->link_modes.advertising);
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_low_lkup); i++) {
if (phy_types_low & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_low_lkup[i], ks,
req_speeds, advert_phy_type_lo,
i);
}
- for (i = 0; i < BITS_PER_TYPE(u64); i++) {
+ for (i = 0; i < ARRAY_SIZE(phy_type_high_lkup); i++) {
if (phy_types_high & BIT_ULL(i))
ice_linkmode_set_bit(&phy_type_high_lkup[i], ks,
req_speeds, advert_phy_type_hi,
@@ -1796,6 +1889,9 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ice_phy_type_to_ethtool(netdev, ks);
switch (link_info->link_speed) {
+ case ICE_AQ_LINK_SPEED_200GB:
+ ks->base.speed = SPEED_200000;
+ break;
case ICE_AQ_LINK_SPEED_100GB:
ks->base.speed = SPEED_100000;
break;
@@ -2008,79 +2104,69 @@ done:
}
/**
+ * ice_speed_to_aq_link - Get AQ link speed by Ethtool forced speed
+ * @speed: ethtool forced speed
+ */
+static u16 ice_speed_to_aq_link(int speed)
+{
+ int aq_speed;
+
+ switch (speed) {
+ case SPEED_10:
+ aq_speed = ICE_AQ_LINK_SPEED_10MB;
+ break;
+ case SPEED_100:
+ aq_speed = ICE_AQ_LINK_SPEED_100MB;
+ break;
+ case SPEED_1000:
+ aq_speed = ICE_AQ_LINK_SPEED_1000MB;
+ break;
+ case SPEED_2500:
+ aq_speed = ICE_AQ_LINK_SPEED_2500MB;
+ break;
+ case SPEED_5000:
+ aq_speed = ICE_AQ_LINK_SPEED_5GB;
+ break;
+ case SPEED_10000:
+ aq_speed = ICE_AQ_LINK_SPEED_10GB;
+ break;
+ case SPEED_20000:
+ aq_speed = ICE_AQ_LINK_SPEED_20GB;
+ break;
+ case SPEED_25000:
+ aq_speed = ICE_AQ_LINK_SPEED_25GB;
+ break;
+ case SPEED_40000:
+ aq_speed = ICE_AQ_LINK_SPEED_40GB;
+ break;
+ case SPEED_50000:
+ aq_speed = ICE_AQ_LINK_SPEED_50GB;
+ break;
+ case SPEED_100000:
+ aq_speed = ICE_AQ_LINK_SPEED_100GB;
+ break;
+ default:
+ aq_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ return aq_speed;
+}
+
+/**
* ice_ksettings_find_adv_link_speed - Find advertising link speed
* @ks: ethtool ksettings
*/
static u16
ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
{
+ const struct ethtool_forced_speed_map *map;
u16 adv_link_speed = 0;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseX_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 1000baseKX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 2500baseX_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 5000baseT_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_5GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseT_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseKR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseSR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 10000baseLR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseCR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseSR_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 25000baseKR_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_25GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseCR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseSR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseLR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 40000baseKR4_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_40GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseCR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseKR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 50000baseSR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
- if (ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseCR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseSR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseLR4_ER4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR4_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseCR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseSR2_Full) ||
- ethtool_link_ksettings_test_link_mode(ks, advertising,
- 100000baseKR2_Full))
- adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
+ for (u32 i = 0; i < ARRAY_SIZE(ice_adv_lnk_speed_maps); i++) {
+ map = ice_adv_lnk_speed_maps + i;
+ if (linkmode_intersects(ks->link_modes.advertising, map->caps))
+ adv_link_speed |= ice_speed_to_aq_link(map->speed);
+ }
return adv_link_speed;
}
@@ -3285,7 +3371,7 @@ ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE;
- info->phc_index = ice_get_ptp_clock_index(pf);
+ info->phc_index = ice_ptp_clock_index(pf);
info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h
index b403ee79cd5e..b88e3da06f13 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.h
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h
@@ -100,6 +100,14 @@ phy_type_high_lkup[] = {
[2] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
[3] = ICE_PHY_TYPE(100GB, 100000baseSR2_Full),
[4] = ICE_PHY_TYPE(100GB, 100000baseCR2_Full),
+ [5] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full),
+ [6] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full),
+ [7] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full),
+ [8] = ICE_PHY_TYPE(200GB, 200000baseLR4_ER4_FR4_Full),
+ [9] = ICE_PHY_TYPE(200GB, 200000baseDR4_Full),
+ [10] = ICE_PHY_TYPE(200GB, 200000baseKR4_Full),
+ [11] = ICE_PHY_TYPE(200GB, 200000baseSR4_Full),
+ [12] = ICE_PHY_TYPE(200GB, 200000baseCR4_Full),
};
#endif /* !_ICE_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 8c6e13f87b7d..d151e5bacfec 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2018-2020, Intel Corporation. */
+/* Copyright (C) 2018-2023, Intel Corporation. */
/* flow director ethtool support for ice */
@@ -540,16 +540,24 @@ static int ice_fdir_num_avail_fltr(struct ice_hw *hw, struct ice_vsi *vsi)
/* total guaranteed filters assigned to this VSI */
num_guar = vsi->num_gfltr;
- /* minus the guaranteed filters programed by this VSI */
- num_guar -= (rd32(hw, VSIQF_FD_CNT(vsi_num)) &
- VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S;
-
/* total global best effort filters */
num_be = hw->func_caps.fd_fltr_best_effort;
- /* minus the global best effort filters programmed */
- num_be -= (rd32(hw, GLQF_FD_CNT) & GLQF_FD_CNT_FD_BCNT_M) >>
- GLQF_FD_CNT_FD_BCNT_S;
+ /* Subtract the number of programmed filters from the global values */
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ num_guar -= FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M,
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
+ num_be -= FIELD_GET(E830_GLQF_FD_CNT_FD_BCNT_M,
+ rd32(hw, GLQF_FD_CNT));
+ break;
+ case ICE_MAC_E810:
+ default:
+ num_guar -= FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M,
+ rd32(hw, VSIQF_FD_CNT(vsi_num)));
+ num_be -= FIELD_GET(E800_GLQF_FD_CNT_FD_BCNT_M,
+ rd32(hw, GLQF_FD_CNT));
+ }
return num_guar + num_be;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.c b/drivers/net/ethernet/intel/ice/ice_flow.c
index 85cca572c22a..fb8b925aaf8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.c
+++ b/drivers/net/ethernet/intel/ice/ice_flow.c
@@ -1318,7 +1318,6 @@ ice_flow_rem_entry_sync(struct ice_hw *hw, enum ice_block __always_unused blk,
list_del(&entry->l_entry);
- devm_kfree(ice_hw_to_dev(hw), entry->entry);
devm_kfree(ice_hw_to_dev(hw), entry);
return 0;
@@ -1645,10 +1644,8 @@ ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id,
*entry_h = ICE_FLOW_ENTRY_HNDL(e);
out:
- if (status && e) {
- devm_kfree(ice_hw_to_dev(hw), e->entry);
+ if (status)
devm_kfree(ice_hw_to_dev(hw), e);
- }
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_flow.h b/drivers/net/ethernet/intel/ice/ice_flow.h
index b465d27d9b80..96923ef0a5a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_flow.h
+++ b/drivers/net/ethernet/intel/ice/ice_flow.h
@@ -350,11 +350,8 @@ struct ice_flow_entry {
u64 id;
struct ice_flow_prof *prof;
- /* Flow entry's content */
- void *entry;
enum ice_flow_priority priority;
u16 vsi_handle;
- u16 entry_sz;
};
#define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e)
diff --git a/drivers/net/ethernet/intel/ice/ice_fw_update.c b/drivers/net/ethernet/intel/ice/ice_fw_update.c
index 3dc5662d62a6..319a2d6fe26c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fw_update.c
+++ b/drivers/net/ethernet/intel/ice/ice_fw_update.c
@@ -293,16 +293,17 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
+ struct ice_aq_desc *desc;
u32 completion_offset;
int err;
- memset(&event, 0, sizeof(event));
-
dev_dbg(dev, "Writing block of %u bytes for module 0x%02x at offset %u\n",
block_size, module, offset);
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write);
+
err = ice_aq_update_nvm(hw, module, offset, block_size, block,
last_cmd, 0, NULL);
if (err) {
@@ -319,7 +320,7 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
* is conservative and is intended to prevent failure to update when
* firmware is slow to respond.
*/
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write, 15 * HZ, &event);
+ err = ice_aq_wait_for_event(pf, &task, 15 * HZ);
if (err) {
dev_err(dev, "Timed out while trying to flash module 0x%02x with block of size %u at offset %u, err %d\n",
module, block_size, offset, err);
@@ -327,11 +328,12 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
return -EIO;
}
- completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
- completion_retval = le16_to_cpu(event.desc.retval);
+ desc = &task.event.desc;
+ completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(desc->retval);
- completion_offset = le16_to_cpu(event.desc.params.nvm.offset_low);
- completion_offset |= event.desc.params.nvm.offset_high << 16;
+ completion_offset = le16_to_cpu(desc->params.nvm.offset_low);
+ completion_offset |= desc->params.nvm.offset_high << 16;
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in write completion: got 0x%x, expected 0x%x\n",
@@ -363,8 +365,8 @@ ice_write_one_nvm_block(struct ice_pf *pf, u16 module, u32 offset,
*/
if (reset_level && last_cmd && module == ICE_SR_1ST_NVM_BANK_PTR) {
if (hw->dev_caps.common_cap.pcie_reset_avoidance) {
- *reset_level = (event.desc.params.nvm.cmd_flags &
- ICE_AQC_NVM_RESET_LVL_M);
+ *reset_level = desc->params.nvm.cmd_flags &
+ ICE_AQC_NVM_RESET_LVL_M;
dev_dbg(dev, "Firmware reported required reset level as %u\n",
*reset_level);
} else {
@@ -479,19 +481,20 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
{
u16 completion_module, completion_retval;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
+ struct ice_aq_desc *desc;
struct devlink *devlink;
int err;
dev_dbg(dev, "Beginning erase of flash component '%s', module 0x%02x\n", component, module);
- memset(&event, 0, sizeof(event));
-
devlink = priv_to_devlink(pf);
devlink_flash_update_timeout_notify(devlink, "Erasing", component, ICE_FW_ERASE_TIMEOUT);
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_erase);
+
err = ice_aq_erase_nvm(hw, module, NULL);
if (err) {
dev_err(dev, "Failed to erase %s (module 0x%02x), err %d aq_err %s\n",
@@ -502,7 +505,7 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_erase, ICE_FW_ERASE_TIMEOUT * HZ, &event);
+ err = ice_aq_wait_for_event(pf, &task, ICE_FW_ERASE_TIMEOUT * HZ);
if (err) {
dev_err(dev, "Timed out waiting for firmware to respond with erase completion for %s (module 0x%02x), err %d\n",
component, module, err);
@@ -510,8 +513,9 @@ ice_erase_nvm_module(struct ice_pf *pf, u16 module, const char *component,
goto out_notify_devlink;
}
- completion_module = le16_to_cpu(event.desc.params.nvm.module_typeid);
- completion_retval = le16_to_cpu(event.desc.retval);
+ desc = &task.event.desc;
+ completion_module = le16_to_cpu(desc->params.nvm.module_typeid);
+ completion_retval = le16_to_cpu(desc->retval);
if (completion_module != module) {
dev_err(dev, "Unexpected module_typeid in erase completion for %s: got 0x%x, expected 0x%x\n",
@@ -560,13 +564,13 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
u8 *emp_reset_available, struct netlink_ext_ack *extack)
{
struct device *dev = ice_pf_to_dev(pf);
- struct ice_rq_event_info event;
+ struct ice_aq_task task = {};
struct ice_hw *hw = &pf->hw;
u16 completion_retval;
u8 response_flags;
int err;
- memset(&event, 0, sizeof(event));
+ ice_aq_prep_for_event(pf, &task, ice_aqc_opc_nvm_write_activate);
err = ice_nvm_write_activate(hw, activate_flags, &response_flags);
if (err) {
@@ -592,8 +596,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
}
}
- err = ice_aq_wait_for_event(pf, ice_aqc_opc_nvm_write_activate, 30 * HZ,
- &event);
+ err = ice_aq_wait_for_event(pf, &task, 30 * HZ);
if (err) {
dev_err(dev, "Timed out waiting for firmware to switch active flash banks, err %d\n",
err);
@@ -601,7 +604,7 @@ ice_switch_flash_banks(struct ice_pf *pf, u8 activate_flags,
return err;
}
- completion_retval = le16_to_cpu(event.desc.retval);
+ completion_retval = le16_to_cpu(task.event.desc.retval);
if (completion_retval) {
dev_err(dev, "Firmware failed to switch active flash banks aq_err %s\n",
ice_aq_str((enum ice_aq_err)completion_retval));
diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c
index 75c9de675f20..c8ea1af51ad3 100644
--- a/drivers/net/ethernet/intel/ice/ice_gnss.c
+++ b/drivers/net/ethernet/intel/ice/ice_gnss.c
@@ -389,6 +389,9 @@ bool ice_gnss_is_gps_present(struct ice_hw *hw)
if (!hw->func_caps.ts_func_info.src_tmr_owned)
return false;
+ if (!ice_is_gps_in_netlist(hw))
+ return false;
+
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
if (ice_is_e810t(hw)) {
int err;
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index a92dc9a16035..86936b758ade 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
/* Machine-generated file */
@@ -231,6 +231,7 @@
#define PFINT_SB_CTL 0x0016B600
#define PFINT_SB_CTL_MSIX_INDX_M ICE_M(0x7FF, 0)
#define PFINT_SB_CTL_CAUSE_ENA_M BIT(30)
+#define PFINT_TSYN_MSK 0x0016C980
#define QINT_RQCTL(_QRX) (0x00150000 + ((_QRX) * 4))
#define QINT_RQCTL_MSIX_INDX_S 0
#define QINT_RQCTL_MSIX_INDX_M ICE_M(0x7FF, 0)
@@ -284,11 +285,11 @@
#define VPLAN_TX_QBASE_VFNUMQ_M ICE_M(0xFF, 16)
#define VPLAN_TXQ_MAPENA(_VF) (0x00073800 + ((_VF) * 4))
#define VPLAN_TXQ_MAPENA_TX_ENA_M BIT(0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E36E0 + ((_i) * 32))
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
-#define PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M ICE_M(0xFFFF, 0)
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3800 + ((_i) * 32))
-#define PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M ICE_M(0xFFFF, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT(_i) (0x001E36E0 + ((_i) * 32))
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_MAX 8
+#define E800_PRTMAC_HSEC_CTL_TX_PS_QNT_M GENMASK(15, 0)
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR(_i) (0x001E3800 + ((_i) * 32))
+#define E800_PRTMAC_HSEC_CTL_TX_PS_RFSH_TMR_M GENMASK(15, 0)
#define GL_MDCK_TX_TDPU 0x00049348
#define GL_MDCK_TX_TDPU_RCU_ANTISPOOF_ITR_DIS_M BIT(1)
#define GL_MDET_RX 0x00294C00
@@ -311,7 +312,11 @@
#define GL_MDET_TX_PQM_MAL_TYPE_S 26
#define GL_MDET_TX_PQM_MAL_TYPE_M ICE_M(0x1F, 26)
#define GL_MDET_TX_PQM_VALID_M BIT(31)
-#define GL_MDET_TX_TCLAN 0x000FC068
+#define GL_MDET_TX_TCLAN_BY_MAC(hw) \
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_GL_MDET_TX_TCLAN : \
+ E800_GL_MDET_TX_TCLAN)
+#define E800_GL_MDET_TX_TCLAN 0x000FC068
+#define E830_GL_MDET_TX_TCLAN 0x000FCCC0
#define GL_MDET_TX_TCLAN_QNUM_S 0
#define GL_MDET_TX_TCLAN_QNUM_M ICE_M(0x7FFF, 0)
#define GL_MDET_TX_TCLAN_VF_NUM_S 15
@@ -325,7 +330,11 @@
#define PF_MDET_RX_VALID_M BIT(0)
#define PF_MDET_TX_PQM 0x002D2C80
#define PF_MDET_TX_PQM_VALID_M BIT(0)
-#define PF_MDET_TX_TCLAN 0x000FC000
+#define PF_MDET_TX_TCLAN_BY_MAC(hw) \
+ ((hw)->mac_type == ICE_MAC_E830 ? E830_PF_MDET_TX_TCLAN : \
+ E800_PF_MDET_TX_TCLAN)
+#define E800_PF_MDET_TX_TCLAN 0x000FC000
+#define E830_PF_MDET_TX_TCLAN 0x000FCC00
#define PF_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_RX(_VF) (0x00294400 + ((_VF) * 4))
#define VP_MDET_RX_VALID_M BIT(0)
@@ -335,6 +344,10 @@
#define VP_MDET_TX_TCLAN_VALID_M BIT(0)
#define VP_MDET_TX_TDPU(_VF) (0x00040000 + ((_VF) * 4))
#define VP_MDET_TX_TDPU_VALID_M BIT(0)
+#define E800_GL_MNG_FWSM_FW_MODES_M GENMASK(2, 0)
+#define E830_GL_MNG_FWSM_FW_MODES_M GENMASK(1, 0)
+#define GL_MNG_FWSM 0x000B6134
+#define GL_MNG_FWSM_FW_LOADING_M BIT(30)
#define GLNVM_FLA 0x000B6108
#define GLNVM_FLA_LOCKED_M BIT(6)
#define GLNVM_GENS 0x000B6100
@@ -361,13 +374,18 @@
#define GL_PWR_MODE_CTL_CAR_MAX_BW_S 30
#define GL_PWR_MODE_CTL_CAR_MAX_BW_M ICE_M(0x3, 30)
#define GLQF_FD_CNT 0x00460018
+#define E800_GLQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
+#define E830_GLQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define GLQF_FD_CNT_FD_BCNT_S 16
-#define GLQF_FD_CNT_FD_BCNT_M ICE_M(0x7FFF, 16)
+#define E800_GLQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
+#define E830_GLQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define GLQF_FD_SIZE 0x00460010
#define GLQF_FD_SIZE_FD_GSIZE_S 0
-#define GLQF_FD_SIZE_FD_GSIZE_M ICE_M(0x7FFF, 0)
+#define E800_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(14, 0)
+#define E830_GLQF_FD_SIZE_FD_GSIZE_M GENMASK(15, 0)
#define GLQF_FD_SIZE_FD_BSIZE_S 16
-#define GLQF_FD_SIZE_FD_BSIZE_M ICE_M(0x7FFF, 16)
+#define E800_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(30, 16)
+#define E830_GLQF_FD_SIZE_FD_BSIZE_M GENMASK(31, 16)
#define GLQF_FDINSET(_i, _j) (0x00412000 + ((_i) * 4 + (_j) * 512))
#define GLQF_FDMASK(_i) (0x00410800 + ((_i) * 4))
#define GLQF_FDMASK_MAX_INDEX 31
@@ -386,6 +404,10 @@
#define GLQF_HMASK_SEL(_i) (0x00410000 + ((_i) * 4))
#define GLQF_HMASK_SEL_MAX_INDEX 127
#define GLQF_HMASK_SEL_MASK_SEL_S 0
+#define E800_PFQF_FD_CNT_FD_GCNT_M GENMASK(14, 0)
+#define E830_PFQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
+#define E800_PFQF_FD_CNT_FD_BCNT_M GENMASK(30, 16)
+#define E830_PFQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define PFQF_FD_ENA 0x0043A000
#define PFQF_FD_ENA_FD_ENA_M BIT(0)
#define PFQF_FD_SIZE 0x00460100
@@ -476,6 +498,7 @@
#define GLTSYN_SYNC_DLAY 0x00088818
#define GLTSYN_TGT_H_0(_i) (0x00088930 + ((_i) * 4))
#define GLTSYN_TGT_L_0(_i) (0x00088928 + ((_i) * 4))
+#define GLTSYN_TIME_0(_i) (0x000888C8 + ((_i) * 4))
#define GLTSYN_TIME_H(_i) (0x000888D8 + ((_i) * 4))
#define GLTSYN_TIME_L(_i) (0x000888D0 + ((_i) * 4))
#define PFHH_SEM 0x000A4200 /* Reset Source: PFR */
@@ -484,12 +507,13 @@
#define PFTSYN_SEM_BUSY_M BIT(0)
#define VSIQF_FD_CNT(_VSI) (0x00464000 + ((_VSI) * 4))
#define VSIQF_FD_CNT_FD_GCNT_S 0
-#define VSIQF_FD_CNT_FD_GCNT_M ICE_M(0x3FFF, 0)
+#define E800_VSIQF_FD_CNT_FD_GCNT_M GENMASK(13, 0)
+#define E830_VSIQF_FD_CNT_FD_GCNT_M GENMASK(15, 0)
#define VSIQF_FD_CNT_FD_BCNT_S 16
-#define VSIQF_FD_CNT_FD_BCNT_M ICE_M(0x3FFF, 16)
+#define E800_VSIQF_FD_CNT_FD_BCNT_M GENMASK(29, 16)
+#define E830_VSIQF_FD_CNT_FD_BCNT_M GENMASK(31, 16)
#define VSIQF_FD_SIZE(_VSI) (0x00462000 + ((_VSI) * 4))
#define VSIQF_HKEY_MAX_INDEX 12
-#define VSIQF_HLUT_MAX_INDEX 15
#define PFPM_APM 0x000B8080
#define PFPM_APM_APME_M BIT(0)
#define PFPM_WUFC 0x0009DC00
@@ -499,6 +523,10 @@
#define PFPM_WUS_MAG_M BIT(1)
#define PFPM_WUS_MNG_M BIT(3)
#define PFPM_WUS_FW_RST_WK_M BIT(31)
+#define E830_PRTMAC_CL01_PS_QNT 0x001E32A0
+#define E830_PRTMAC_CL01_PS_QNT_CL0_M GENMASK(15, 0)
+#define E830_PRTMAC_CL01_QNT_THR 0x001E3320
+#define E830_PRTMAC_CL01_QNT_THR_CL0_M GENMASK(15, 0)
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
#define VFINT_DYN_CTLN_CLEARPBA_M BIT(1)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 5a7753bda324..b47cd43ae871 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -4,8 +4,27 @@
/* Link Aggregation code */
#include "ice.h"
+#include "ice_lib.h"
#include "ice_lag.h"
+#define ICE_LAG_RES_SHARED BIT(14)
+#define ICE_LAG_RES_VALID BIT(15)
+
+#define LACP_TRAIN_PKT_LEN 16
+static const u8 lacp_train_pkt[LACP_TRAIN_PKT_LEN] = { 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0x88, 0x09, 0, 0 };
+
+#define ICE_RECIPE_LEN 64
+static const u8 ice_dflt_vsi_rcp[ICE_RECIPE_LEN] = {
+ 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x85, 0, 0x01, 0, 0, 0, 0xff, 0xff, 0x08, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0x30 };
+static const u8 ice_lport_rcp[ICE_RECIPE_LEN] = {
+ 0x05, 0, 0, 0, 0x20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0x85, 0, 0x16, 0, 0, 0, 0xff, 0xff, 0x07, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0x30 };
+
/**
* ice_lag_set_primary - set PF LAG state as Primary
* @lag: LAG info struct
@@ -47,16 +66,257 @@ static void ice_lag_set_backup(struct ice_lag *lag)
}
/**
+ * netif_is_same_ice - determine if netdev is on the same ice NIC as local PF
+ * @pf: local PF struct
+ * @netdev: netdev we are evaluating
+ */
+static bool netif_is_same_ice(struct ice_pf *pf, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_pf *test_pf;
+ struct ice_vsi *vsi;
+
+ if (!netif_is_ice(netdev))
+ return false;
+
+ np = netdev_priv(netdev);
+ if (!np)
+ return false;
+
+ vsi = np->vsi;
+ if (!vsi)
+ return false;
+
+ test_pf = vsi->back;
+ if (!test_pf)
+ return false;
+
+ if (pf->pdev->bus != test_pf->pdev->bus ||
+ pf->pdev->slot != test_pf->pdev->slot)
+ return false;
+
+ return true;
+}
+
+/**
+ * ice_netdev_to_lag - return pointer to associated lag struct from netdev
+ * @netdev: pointer to net_device struct to query
+ */
+static struct ice_lag *ice_netdev_to_lag(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_vsi *vsi;
+
+ if (!netif_is_ice(netdev))
+ return NULL;
+
+ np = netdev_priv(netdev);
+ if (!np)
+ return NULL;
+
+ vsi = np->vsi;
+ if (!vsi)
+ return NULL;
+
+ return vsi->back->lag;
+}
+
+/**
+ * ice_lag_find_hw_by_lport - return an hw struct from bond members lport
+ * @lag: lag struct
+ * @lport: lport value to search for
+ */
+static struct ice_hw *
+ice_lag_find_hw_by_lport(struct ice_lag *lag, u8 lport)
+{
+ struct ice_lag_netdev_list *entry;
+ struct net_device *tmp_netdev;
+ struct ice_netdev_priv *np;
+ struct ice_hw *hw;
+
+ list_for_each_entry(entry, lag->netdev_head, node) {
+ tmp_netdev = entry->netdev;
+ if (!tmp_netdev || !netif_is_ice(tmp_netdev))
+ continue;
+
+ np = netdev_priv(tmp_netdev);
+ if (!np || !np->vsi)
+ continue;
+
+ hw = &np->vsi->back->hw;
+ if (hw->port_info->lport == lport)
+ return hw;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_lag_find_primary - returns pointer to primary interfaces lag struct
+ * @lag: local interfaces lag struct
+ */
+static struct ice_lag *ice_lag_find_primary(struct ice_lag *lag)
+{
+ struct ice_lag *primary_lag = NULL;
+ struct list_head *tmp;
+
+ list_for_each(tmp, lag->netdev_head) {
+ struct ice_lag_netdev_list *entry;
+ struct ice_lag *tmp_lag;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ tmp_lag = ice_netdev_to_lag(entry->netdev);
+ if (tmp_lag && tmp_lag->primary) {
+ primary_lag = tmp_lag;
+ break;
+ }
+ }
+
+ return primary_lag;
+}
+
+/**
+ * ice_lag_cfg_fltr - Add/Remove rule for LAG
+ * @lag: lag struct for local interface
+ * @act: rule action
+ * @recipe_id: recipe id for the new rule
+ * @rule_idx: pointer to rule index
+ * @add: boolean on whether we are adding filters
+ */
+static int
+ice_lag_cfg_fltr(struct ice_lag *lag, u32 act, u16 recipe_id, u16 *rule_idx,
+ bool add)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule;
+ u16 s_rule_sz, vsi_num;
+ struct ice_hw *hw;
+ u8 *eth_hdr;
+ u32 opc;
+ int err;
+
+ hw = &lag->pf->hw;
+ vsi_num = ice_get_hw_vsi_num(hw, 0);
+
+ s_rule_sz = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
+ s_rule = kzalloc(s_rule_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_err(ice_pf_to_dev(lag->pf), "error allocating rule for LAG\n");
+ return -ENOMEM;
+ }
+
+ if (add) {
+ eth_hdr = s_rule->hdr_data;
+ ice_fill_eth_hdr(eth_hdr);
+
+ act |= (vsi_num << ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M;
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->recipe_id = cpu_to_le16(recipe_id);
+ s_rule->src = cpu_to_le16(hw->port_info->lport);
+ s_rule->act = cpu_to_le32(act);
+ s_rule->hdr_len = cpu_to_le16(DUMMY_ETH_HDR_LEN);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ s_rule->index = cpu_to_le16(*rule_idx);
+ opc = ice_aqc_opc_remove_sw_rules;
+ }
+
+ err = ice_aq_sw_rules(&lag->pf->hw, s_rule, s_rule_sz, 1, opc, NULL);
+ if (err)
+ goto dflt_fltr_free;
+
+ if (add)
+ *rule_idx = le16_to_cpu(s_rule->index);
+ else
+ *rule_idx = 0;
+
+dflt_fltr_free:
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_lag_cfg_dflt_fltr - Add/Remove default VSI rule for LAG
+ * @lag: lag struct for local interface
+ * @add: boolean on whether to add filter
+ */
+static int
+ice_lag_cfg_dflt_fltr(struct ice_lag *lag, bool add)
+{
+ u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT | ICE_SINGLE_ACT_LAN_ENABLE;
+
+ return ice_lag_cfg_fltr(lag, act, lag->pf_recipe,
+ &lag->pf_rule_id, add);
+}
+
+/**
+ * ice_lag_cfg_drop_fltr - Add/Remove lport drop rule
+ * @lag: lag struct for local interface
+ * @add: boolean on whether to add filter
+ */
+static int
+ice_lag_cfg_drop_fltr(struct ice_lag *lag, bool add)
+{
+ u32 act = ICE_SINGLE_ACT_VSI_FORWARDING |
+ ICE_SINGLE_ACT_VALID_BIT |
+ ICE_SINGLE_ACT_DROP;
+
+ return ice_lag_cfg_fltr(lag, act, lag->lport_recipe,
+ &lag->lport_rule_idx, add);
+}
+
+/**
+ * ice_lag_cfg_pf_fltrs - set filters up for new active port
+ * @lag: local interfaces lag struct
+ * @ptr: opaque data containing notifier event
+ */
+static void
+ice_lag_cfg_pf_fltrs(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
+ struct device *dev;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ /* not for this netdev */
+ if (event_netdev != lag->netdev)
+ return;
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+ dev = ice_pf_to_dev(lag->pf);
+
+ /* interface not active - remove old default VSI rule */
+ if (bonding_info->slave.state && lag->pf_rule_id) {
+ if (ice_lag_cfg_dflt_fltr(lag, false))
+ dev_err(dev, "Error removing old default VSI filter\n");
+ if (ice_lag_cfg_drop_fltr(lag, true))
+ dev_err(dev, "Error adding new drop filter\n");
+ return;
+ }
+
+ /* interface becoming active - add new default VSI rule */
+ if (!bonding_info->slave.state && !lag->pf_rule_id) {
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(dev, "Error adding new default VSI filter\n");
+ if (lag->lport_rule_idx && ice_lag_cfg_drop_fltr(lag, false))
+ dev_err(dev, "Error removing old drop filter\n");
+ }
+}
+
+/**
* ice_display_lag_info - print LAG info
* @lag: LAG info struct
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *primary;
+ const char *name, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
- peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
@@ -78,8 +338,445 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
- bonded, peer, upper, role, primary);
+ dev_dbg(dev, "%s %s, upper:%s, role:%s, primary:%s\n", name, bonded,
+ upper, role, primary);
+}
+
+/**
+ * ice_lag_qbuf_recfg - generate a buffer of queues for a reconfigure command
+ * @hw: HW struct that contains the queue contexts
+ * @qbuf: pointer to buffer to populate
+ * @vsi_num: index of the VSI in PF space
+ * @numq: number of queues to search for
+ * @tc: traffic class that contains the queues
+ *
+ * function returns the number of valid queues in buffer
+ */
+static u16
+ice_lag_qbuf_recfg(struct ice_hw *hw, struct ice_aqc_cfg_txqs_buf *qbuf,
+ u16 vsi_num, u16 numq, u8 tc)
+{
+ struct ice_q_ctx *q_ctx;
+ u16 qid, count = 0;
+ struct ice_pf *pf;
+ int i;
+
+ pf = hw->back;
+ for (i = 0; i < numq; i++) {
+ q_ctx = ice_get_lan_q_ctx(hw, vsi_num, tc, i);
+ if (!q_ctx) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d NO Q CONTEXT\n",
+ __func__, i);
+ continue;
+ }
+ if (q_ctx->q_teid == ICE_INVAL_TEID) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL TEID\n",
+ __func__, i);
+ continue;
+ }
+ if (q_ctx->q_handle == ICE_INVAL_Q_HANDLE) {
+ dev_dbg(ice_hw_to_dev(hw), "%s queue %d INVAL Q HANDLE\n",
+ __func__, i);
+ continue;
+ }
+
+ qid = pf->vsi[vsi_num]->txq_map[q_ctx->q_handle];
+ qbuf->queue_info[count].q_handle = cpu_to_le16(qid);
+ qbuf->queue_info[count].tc = tc;
+ qbuf->queue_info[count].q_teid = cpu_to_le32(q_ctx->q_teid);
+ count++;
+ }
+
+ return count;
+}
+
+/**
+ * ice_lag_get_sched_parent - locate or create a sched node parent
+ * @hw: HW struct for getting parent in
+ * @tc: traffic class on parent/node
+ */
+static struct ice_sched_node *
+ice_lag_get_sched_parent(struct ice_hw *hw, u8 tc)
+{
+ struct ice_sched_node *tc_node, *aggnode, *parent = NULL;
+ u16 num_nodes[ICE_AQC_TOPO_MAX_LEVEL_NUM] = { 0 };
+ struct ice_port_info *pi = hw->port_info;
+ struct device *dev;
+ u8 aggl, vsil;
+ int n;
+
+ dev = ice_hw_to_dev(hw);
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (!tc_node) {
+ dev_warn(dev, "Failure to find TC node for LAG move\n");
+ return parent;
+ }
+
+ aggnode = ice_sched_get_agg_node(pi, tc_node, ICE_DFLT_AGG_ID);
+ if (!aggnode) {
+ dev_warn(dev, "Failure to find aggregate node for LAG move\n");
+ return parent;
+ }
+
+ aggl = ice_sched_get_agg_layer(hw);
+ vsil = ice_sched_get_vsi_layer(hw);
+
+ for (n = aggl + 1; n < vsil; n++)
+ num_nodes[n] = 1;
+
+ for (n = 0; n < aggnode->num_children; n++) {
+ parent = ice_sched_get_free_vsi_parent(hw, aggnode->children[n],
+ num_nodes);
+ if (parent)
+ return parent;
+ }
+
+ /* if free parent not found - add one */
+ parent = aggnode;
+ for (n = aggl + 1; n < vsil; n++) {
+ u16 num_nodes_added;
+ u32 first_teid;
+ int err;
+
+ err = ice_sched_add_nodes_to_layer(pi, tc_node, parent, n,
+ num_nodes[n], &first_teid,
+ &num_nodes_added);
+ if (err || num_nodes[n] != num_nodes_added)
+ return NULL;
+
+ if (num_nodes_added)
+ parent = ice_sched_find_node_by_teid(tc_node,
+ first_teid);
+ else
+ parent = parent->children[0];
+ if (!parent) {
+ dev_warn(dev, "Failure to add new parent for LAG move\n");
+ return parent;
+ }
+ }
+
+ return parent;
+}
+
+/**
+ * ice_lag_move_vf_node_tc - move scheduling nodes for one VF on one TC
+ * @lag: lag info struct
+ * @oldport: lport of previous nodes location
+ * @newport: lport of destination nodes location
+ * @vsi_num: array index of VSI in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_move_vf_node_tc(struct ice_lag *lag, u8 oldport, u8 newport,
+ u16 vsi_num, u8 tc)
+{
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_sched_node *n_prt;
+ struct ice_hw *new_hw = NULL;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ u32 tmp_teid;
+
+ ctx = ice_get_vsi_ctx(&lag->pf->hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "Unable to locate VSI context for LAG failover\n");
+ return;
+ }
+
+ /* check to see if this VF is enabled on this TC */
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ /* locate HW struct for destination port */
+ new_hw = ice_lag_find_hw_by_lport(lag, newport);
+ if (!new_hw) {
+ dev_warn(dev, "Unable to locate HW struct for LAG node destination\n");
+ return;
+ }
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+ /* if no teid assigned or numq == 0, then this TC is not active */
+ if (!tmp_teid || !numq)
+ return;
+
+ /* suspend VSI subtree for Traffic Class "tc" on
+ * this VF's VSI
+ */
+ if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
+
+ /* reconfigure all VF's queues on this Traffic Class
+ * to new port
+ */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
+ goto resume_traffic;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(&lag->pf->hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_dbg(dev, "No valid queues found for LAG failover\n");
+ goto qbuf_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(&lag->pf->hw, qbuf, qbuf_size, valq, oldport,
+ newport, NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG failover\n");
+ goto qbuf_err;
+ }
+
+qbuf_none:
+ kfree(qbuf);
+
+ /* find new parent in destination port's tree for VF VSI node on this
+ * Traffic Class
+ */
+ n_prt = ice_lag_get_sched_parent(new_hw, tc);
+ if (!n_prt)
+ goto resume_traffic;
+
+ /* Move Vf's VSI node for this TC to newport's scheduler tree */
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
+ dev_warn(dev, "Failure to move VF nodes for failover\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ goto resume_traffic;
+
+qbuf_err:
+ kfree(qbuf);
+
+resume_traffic:
+ /* restart traffic for VSI node */
+ if (ice_sched_suspend_resume_elems(&lag->pf->hw, 1, &tmp_teid, false))
+ dev_dbg(dev, "Problem restarting traffic for LAG node move\n");
+}
+
+/**
+ * ice_lag_build_netdev_list - populate the lag struct's netdev list
+ * @lag: local lag struct
+ * @ndlist: pointer to netdev list to populate
+ */
+static void ice_lag_build_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *nl;
+ struct net_device *tmp_nd;
+
+ INIT_LIST_HEAD(&ndlist->node);
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ nl = kzalloc(sizeof(*nl), GFP_ATOMIC);
+ if (!nl)
+ break;
+
+ nl->netdev = tmp_nd;
+ list_add(&nl->node, &ndlist->node);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = &ndlist->node;
+}
+
+/**
+ * ice_lag_destroy_netdev_list - free lag struct's netdev list
+ * @lag: pointer to local lag struct
+ * @ndlist: pointer to lag struct netdev list
+ */
+static void ice_lag_destroy_netdev_list(struct ice_lag *lag,
+ struct ice_lag_netdev_list *ndlist)
+{
+ struct ice_lag_netdev_list *entry, *n;
+
+ rcu_read_lock();
+ list_for_each_entry_safe(entry, n, &ndlist->node, node) {
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ rcu_read_unlock();
+ lag->netdev_head = NULL;
+}
+
+/**
+ * ice_lag_move_single_vf_nodes - Move Tx scheduling nodes for single VF
+ * @lag: primary interface LAG struct
+ * @oldport: lport of previous interface
+ * @newport: lport of destination interface
+ * @vsi_num: SW index of VF's VSI
+ */
+static void
+ice_lag_move_single_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport,
+ u16 vsi_num)
+{
+ u8 tc;
+
+ ice_for_each_traffic_class(tc)
+ ice_lag_move_vf_node_tc(lag, oldport, newport, vsi_num, tc);
+}
+
+/**
+ * ice_lag_move_new_vf_nodes - Move Tx scheduling nodes for a VF if required
+ * @vf: the VF to move Tx nodes for
+ *
+ * Called just after configuring new VF queues. Check whether the VF Tx
+ * scheduling nodes need to be updated to fail over to the active port. If so,
+ * move them now.
+ */
+void ice_lag_move_new_vf_nodes(struct ice_vf *vf)
+{
+ struct ice_lag_netdev_list ndlist;
+ u8 pri_port, act_port;
+ struct ice_lag *lag;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ if (WARN_ON(vsi->type != ICE_VSI_VF))
+ return;
+
+ pf = vf->pf;
+ lag = pf->lag;
+
+ mutex_lock(&pf->lag_mutex);
+ if (!lag->bonded)
+ goto new_vf_unlock;
+
+ pri_port = pf->hw.port_info->lport;
+ act_port = lag->active_port;
+
+ if (lag->upper_netdev)
+ ice_lag_build_netdev_list(lag, &ndlist);
+
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) &&
+ lag->bonded && lag->primary && pri_port != act_port &&
+ !list_empty(lag->netdev_head))
+ ice_lag_move_single_vf_nodes(lag, pri_port, act_port, vsi->idx);
+
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+
+new_vf_unlock:
+ mutex_unlock(&pf->lag_mutex);
+}
+
+/**
+ * ice_lag_move_vf_nodes - move Tx scheduling nodes for all VFs to new port
+ * @lag: lag info struct
+ * @oldport: lport of previous interface
+ * @newport: lport of destination interface
+ */
+static void ice_lag_move_vf_nodes(struct ice_lag *lag, u8 oldport, u8 newport)
+{
+ struct ice_pf *pf;
+ int i;
+
+ if (!lag->primary)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_lag_move_single_vf_nodes(lag, oldport, newport, i);
+}
+
+/**
+ * ice_lag_move_vf_nodes_cfg - move vf nodes outside LAG netdev event context
+ * @lag: local lag struct
+ * @src_prt: lport value for source port
+ * @dst_prt: lport value for destination port
+ *
+ * This function is used to move nodes during an out-of-netdev-event situation,
+ * primarily when the driver needs to reconfigure or recreate resources.
+ *
+ * Must be called while holding the lag_mutex to avoid lag events from
+ * processing while out-of-sync moves are happening. Also, paired moves,
+ * such as used in a reset flow, should both be called under the same mutex
+ * lock to avoid changes between start of reset and end of reset.
+ */
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt)
+{
+ struct ice_lag_netdev_list ndlist;
+
+ ice_lag_build_netdev_list(lag, &ndlist);
+ ice_lag_move_vf_nodes(lag, src_prt, dst_prt);
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+}
+
+#define ICE_LAG_SRIOV_CP_RECIPE 10
+#define ICE_LAG_SRIOV_TRAIN_PKT_LEN 16
+
+/**
+ * ice_lag_cfg_cp_fltr - configure filter for control packets
+ * @lag: local interface's lag struct
+ * @add: add or remove rule
+ */
+static void
+ice_lag_cfg_cp_fltr(struct ice_lag *lag, bool add)
+{
+ struct ice_sw_rule_lkup_rx_tx *s_rule = NULL;
+ struct ice_vsi *vsi;
+ u16 buf_len, opc;
+
+ vsi = lag->pf->vsi[0];
+
+ buf_len = ICE_SW_RULE_RX_TX_HDR_SIZE(s_rule,
+ ICE_LAG_SRIOV_TRAIN_PKT_LEN);
+ s_rule = kzalloc(buf_len, GFP_KERNEL);
+ if (!s_rule) {
+ netdev_warn(lag->netdev, "-ENOMEM error configuring CP filter\n");
+ return;
+ }
+
+ if (add) {
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->recipe_id = cpu_to_le16(ICE_LAG_SRIOV_CP_RECIPE);
+ s_rule->src = cpu_to_le16(vsi->port_info->lport);
+ s_rule->act = cpu_to_le32(ICE_FWD_TO_VSI |
+ ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_VALID_BIT |
+ ((vsi->vsi_num <<
+ ICE_SINGLE_ACT_VSI_ID_S) &
+ ICE_SINGLE_ACT_VSI_ID_M));
+ s_rule->hdr_len = cpu_to_le16(ICE_LAG_SRIOV_TRAIN_PKT_LEN);
+ memcpy(s_rule->hdr_data, lacp_train_pkt, LACP_TRAIN_PKT_LEN);
+ opc = ice_aqc_opc_add_sw_rules;
+ } else {
+ opc = ice_aqc_opc_remove_sw_rules;
+ s_rule->index = cpu_to_le16(lag->cp_rule_idx);
+ }
+ if (ice_aq_sw_rules(&lag->pf->hw, s_rule, buf_len, 1, opc, NULL)) {
+ netdev_warn(lag->netdev, "Error %s CP rule for fail-over\n",
+ add ? "ADDING" : "REMOVING");
+ goto cp_free;
+ }
+
+ if (add)
+ lag->cp_rule_idx = le16_to_cpu(s_rule->index);
+ else
+ lag->cp_rule_idx = 0;
+
+cp_free:
+ kfree(s_rule);
}
/**
@@ -124,117 +821,422 @@ lag_out:
}
/**
+ * ice_lag_reclaim_vf_tc - move scheduling nodes back to primary interface
+ * @lag: primary interface lag struct
+ * @src_hw: HW struct current node location
+ * @vsi_num: VSI index in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_reclaim_vf_tc(struct ice_lag *lag, struct ice_hw *src_hw, u16 vsi_num,
+ u8 tc)
+{
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_sched_node *n_prt;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ struct ice_hw *hw;
+ u32 tmp_teid;
+
+ hw = &lag->pf->hw;
+ ctx = ice_get_vsi_ctx(hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "Unable to locate VSI context for LAG reclaim\n");
+ return;
+ }
+
+ /* check to see if this VF is enabled on this TC */
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+
+ /* if !teid or !numq, then this TC is not active */
+ if (!tmp_teid || !numq)
+ return;
+
+ /* suspend traffic */
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic for LAG node move\n");
+
+ /* reconfig queues for new port */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating memory for VF queue recfg buffer\n");
+ goto resume_reclaim;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_dbg(dev, "No valid queues found for LAG reclaim\n");
+ goto reclaim_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq,
+ src_hw->port_info->lport, hw->port_info->lport,
+ NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG failover\n");
+ goto reclaim_qerr;
+ }
+
+reclaim_none:
+ kfree(qbuf);
+
+ /* find parent in primary tree */
+ n_prt = ice_lag_get_sched_parent(hw, tc);
+ if (!n_prt)
+ goto resume_reclaim;
+
+ /* Move node to new parent */
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
+ dev_warn(dev, "Failure to move VF nodes for LAG reclaim\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ goto resume_reclaim;
+
+reclaim_qerr:
+ kfree(qbuf);
+
+resume_reclaim:
+ /* restart traffic */
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
+ dev_warn(dev, "Problem restarting traffic for LAG node reclaim\n");
+}
+
+/**
+ * ice_lag_reclaim_vf_nodes - When interface leaving bond primary reclaims nodes
+ * @lag: primary interface lag struct
+ * @src_hw: HW struct for current node location
+ */
+static void
+ice_lag_reclaim_vf_nodes(struct ice_lag *lag, struct ice_hw *src_hw)
+{
+ struct ice_pf *pf;
+ int i, tc;
+
+ if (!lag->primary || !src_hw)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_for_each_traffic_class(tc)
+ ice_lag_reclaim_vf_tc(lag, src_hw, i, tc);
+}
+
+/**
* ice_lag_link - handle LAG link event
* @lag: LAG info struct
- * @info: info from the netdev notifier
*/
-static void
-ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
+static void ice_lag_link(struct ice_lag *lag)
{
- struct net_device *netdev_tmp, *upper = info->upper_dev;
struct ice_pf *pf = lag->pf;
- int peers = 0;
if (lag->bonded)
dev_warn(ice_pf_to_dev(pf), "%s Already part of a bond\n",
netdev_name(lag->netdev));
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(upper, netdev_tmp)
- peers++;
- rcu_read_unlock();
-
- if (lag->upper_netdev != upper) {
- dev_hold(upper);
- lag->upper_netdev = upper;
- }
-
- ice_clear_rdma_cap(pf);
-
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
-
- /* if this is the first element in an LAG mark as primary */
- lag->primary = !!(peers == 1);
+ netdev_info(lag->netdev, "Shared SR-IOV resources in bond are active\n");
}
/**
* ice_lag_unlink - handle unlink event
* @lag: LAG info struct
- * @info: info from netdev notification
*/
-static void
-ice_lag_unlink(struct ice_lag *lag,
- struct netdev_notifier_changeupper_info *info)
+static void ice_lag_unlink(struct ice_lag *lag)
{
- struct net_device *netdev_tmp, *upper = info->upper_dev;
+ u8 pri_port, act_port, loc_port;
struct ice_pf *pf = lag->pf;
- bool found = false;
if (!lag->bonded) {
netdev_dbg(lag->netdev, "bonding unlink event on non-LAG netdev\n");
return;
}
- /* determine if we are in the new LAG config or not */
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(upper, netdev_tmp) {
- if (netdev_tmp == lag->netdev) {
- found = true;
- break;
+ if (lag->primary) {
+ act_port = lag->active_port;
+ pri_port = lag->pf->hw.port_info->lport;
+ if (act_port != pri_port && act_port != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes(lag, act_port, pri_port);
+ lag->primary = false;
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ } else {
+ struct ice_lag *primary_lag;
+
+ primary_lag = ice_lag_find_primary(lag);
+ if (primary_lag) {
+ act_port = primary_lag->active_port;
+ pri_port = primary_lag->pf->hw.port_info->lport;
+ loc_port = pf->hw.port_info->lport;
+ if (act_port == loc_port &&
+ act_port != ICE_LAG_INVALID_PORT) {
+ ice_lag_reclaim_vf_nodes(primary_lag,
+ &lag->pf->hw);
+ primary_lag->active_port = ICE_LAG_INVALID_PORT;
+ }
}
}
- rcu_read_unlock();
- if (found)
+ lag->bonded = false;
+ lag->role = ICE_LAG_NONE;
+ lag->upper_netdev = NULL;
+}
+
+/**
+ * ice_lag_link_unlink - helper function to call lag_link/unlink
+ * @lag: lag info struct
+ * @ptr: opaque pointer data
+ */
+static void ice_lag_link_unlink(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+
+ if (netdev != lag->netdev)
return;
- if (lag->upper_netdev) {
- dev_put(lag->upper_netdev);
- lag->upper_netdev = NULL;
+ if (info->linking)
+ ice_lag_link(lag);
+ else
+ ice_lag_unlink(lag);
+}
+
+/**
+ * ice_lag_set_swid - set the SWID on secondary interface
+ * @primary_swid: primary interface's SWID
+ * @local_lag: local interfaces LAG struct
+ * @link: Is this a linking activity
+ *
+ * If link is false, then primary_swid should be expected to not be valid
+ * This function should never be called in interrupt context.
+ */
+static void
+ice_lag_set_swid(u16 primary_swid, struct ice_lag *local_lag,
+ bool link)
+{
+ struct ice_aqc_alloc_free_res_elem *buf;
+ struct ice_aqc_set_port_params *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_len, swid;
+ int status, i;
+
+ buf_len = struct_size(buf, elem, 1);
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf) {
+ dev_err(ice_pf_to_dev(local_lag->pf), "-ENOMEM error setting SWID\n");
+ return;
}
- lag->peer_netdev = NULL;
- ice_set_rdma_cap(pf);
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
+ buf->num_elems = cpu_to_le16(1);
+ buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_SWID);
+ /* if unlinnking need to free the shared resource */
+ if (!link && local_lag->bond_swid) {
+ buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
+ status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf,
+ buf_len, ice_aqc_opc_free_res);
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error freeing SWID during LAG unlink\n");
+ local_lag->bond_swid = 0;
+ }
+
+ if (link) {
+ buf->res_type |= cpu_to_le16(ICE_LAG_RES_SHARED |
+ ICE_LAG_RES_VALID);
+ /* store the primary's SWID in case it leaves bond first */
+ local_lag->bond_swid = primary_swid;
+ buf->elem[0].e.sw_resp = cpu_to_le16(local_lag->bond_swid);
+ } else {
+ buf->elem[0].e.sw_resp =
+ cpu_to_le16(local_lag->pf->hw.port_info->sw_id);
+ }
+
+ status = ice_aq_alloc_free_res(&local_lag->pf->hw, buf, buf_len,
+ ice_aqc_opc_alloc_res);
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error subscribing to SWID 0x%04X\n",
+ local_lag->bond_swid);
+
+ kfree(buf);
+
+ /* Configure port param SWID to correct value */
+ if (link)
+ swid = primary_swid;
+ else
+ swid = local_lag->pf->hw.port_info->sw_id;
+
+ cmd = &desc.params.set_port_params;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
+
+ cmd->swid = cpu_to_le16(ICE_AQC_PORT_SWID_VALID | swid);
+ /* If this is happening in reset context, it is possible that the
+ * primary interface has not finished setting its SWID to SHARED
+ * yet. Allow retries to account for this timing issue between
+ * interfaces.
+ */
+ for (i = 0; i < ICE_LAG_RESET_RETRIES; i++) {
+ status = ice_aq_send_cmd(&local_lag->pf->hw, &desc, NULL, 0,
+ NULL);
+ if (!status)
+ break;
+
+ usleep_range(1000, 2000);
+ }
+
+ if (status)
+ dev_err(ice_pf_to_dev(local_lag->pf), "Error setting SWID in port params %d\n",
+ status);
}
/**
- * ice_lag_unregister - handle netdev unregister events
- * @lag: LAG info struct
- * @netdev: netdev reporting the event
+ * ice_lag_primary_swid - set/clear the SHARED attrib of primary's SWID
+ * @lag: primary interface's lag struct
+ * @link: is this a linking activity
+ *
+ * Implement setting primary SWID as shared using 0x020B
*/
-static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
+static void ice_lag_primary_swid(struct ice_lag *lag, bool link)
{
- struct ice_pf *pf = lag->pf;
+ struct ice_hw *hw;
+ u16 swid;
- /* check to see if this event is for this netdev
- * check that we are in an aggregate
- */
- if (netdev != lag->netdev || !lag->bonded)
+ hw = &lag->pf->hw;
+ swid = hw->port_info->sw_id;
+
+ if (ice_share_res(hw, ICE_AQC_RES_TYPE_SWID, link, swid))
+ dev_warn(ice_pf_to_dev(lag->pf), "Failure to set primary interface shared status\n");
+}
+
+/**
+ * ice_lag_add_prune_list - Adds event_pf's VSI to primary's prune list
+ * @lag: lag info struct
+ * @event_pf: PF struct for VSI we are adding to primary's prune list
+ */
+static void ice_lag_add_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
+{
+ u16 num_vsi, rule_buf_sz, vsi_list_id, event_vsi_num, prim_vsi_idx;
+ struct ice_sw_rule_vsi_list *s_rule = NULL;
+ struct device *dev;
+
+ num_vsi = 1;
+
+ dev = ice_pf_to_dev(lag->pf);
+ event_vsi_num = event_pf->vsi[0]->vsi_num;
+ prim_vsi_idx = lag->pf->vsi[0]->idx;
+
+ if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
+ prim_vsi_idx, &vsi_list_id)) {
+ dev_warn(dev, "Could not locate prune list when setting up SRIOV LAG\n");
return;
+ }
- if (lag->upper_netdev) {
- dev_put(lag->upper_netdev);
- lag->upper_netdev = NULL;
- ice_set_rdma_cap(pf);
+ rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_warn(dev, "Error allocating space for prune list when configuring SRIOV LAG\n");
+ return;
}
- /* perform some cleanup in case we come back */
- lag->bonded = false;
- lag->role = ICE_LAG_NONE;
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_SET);
+ s_rule->index = cpu_to_le16(vsi_list_id);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->vsi[0] = cpu_to_le16(event_vsi_num);
+
+ if (ice_aq_sw_rules(&event_pf->hw, s_rule, rule_buf_sz, 1,
+ ice_aqc_opc_update_sw_rules, NULL))
+ dev_warn(dev, "Error adding VSI prune list\n");
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_del_prune_list - Remove secondary's vsi from primary's prune list
+ * @lag: primary interface's ice_lag struct
+ * @event_pf: PF struct for unlinking interface
+ */
+static void ice_lag_del_prune_list(struct ice_lag *lag, struct ice_pf *event_pf)
+{
+ u16 num_vsi, vsi_num, vsi_idx, rule_buf_sz, vsi_list_id;
+ struct ice_sw_rule_vsi_list *s_rule = NULL;
+ struct device *dev;
+
+ num_vsi = 1;
+
+ dev = ice_pf_to_dev(lag->pf);
+ vsi_num = event_pf->vsi[0]->vsi_num;
+ vsi_idx = lag->pf->vsi[0]->idx;
+
+ if (!ice_find_vsi_list_entry(&lag->pf->hw, ICE_SW_LKUP_VLAN,
+ vsi_idx, &vsi_list_id)) {
+ dev_warn(dev, "Could not locate prune list when unwinding SRIOV LAG\n");
+ return;
+ }
+
+ rule_buf_sz = (u16)ICE_SW_RULE_VSI_LIST_SIZE(s_rule, num_vsi);
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule) {
+ dev_warn(dev, "Error allocating prune list when unwinding SRIOV LAG\n");
+ return;
+ }
+
+ s_rule->hdr.type = cpu_to_le16(ICE_AQC_SW_RULES_T_PRUNE_LIST_CLEAR);
+ s_rule->index = cpu_to_le16(vsi_list_id);
+ s_rule->number_vsi = cpu_to_le16(num_vsi);
+ s_rule->vsi[0] = cpu_to_le16(vsi_num);
+
+ if (ice_aq_sw_rules(&event_pf->hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_update_sw_rules, NULL))
+ dev_warn(dev, "Error clearing VSI prune list\n");
+
+ kfree(s_rule);
+}
+
+/**
+ * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * @pf: PF struct
+ */
+static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
+{
+ struct ice_hw_common_caps *caps;
+
+ caps = &pf->hw.dev_caps.common_cap;
+ if (caps->roce_lag)
+ ice_set_feature_support(pf, ICE_F_ROCE_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
+
+ if (caps->sriov_lag)
+ ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
}
/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
* @ptr: opaque pointer data
- *
- * ptr is to be cast into netdev_notifier_changeupper_info
*/
static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
{
struct netdev_notifier_changeupper_info *info;
+ struct ice_lag *primary_lag;
struct net_device *netdev;
info = ptr;
@@ -244,44 +1246,437 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
if (netdev != lag->netdev)
return;
- if (!info->upper_dev) {
- netdev_dbg(netdev, "changeupper rcvd, but no upper defined\n");
+ primary_lag = ice_lag_find_primary(lag);
+ if (info->linking) {
+ lag->upper_netdev = info->upper_dev;
+ /* If there is not already a primary interface in the LAG,
+ * then mark this one as primary.
+ */
+ if (!primary_lag) {
+ lag->primary = true;
+ /* Configure primary's SWID to be shared */
+ ice_lag_primary_swid(lag, true);
+ primary_lag = lag;
+ } else {
+ u16 swid;
+
+ swid = primary_lag->pf->hw.port_info->sw_id;
+ ice_lag_set_swid(swid, lag, true);
+ ice_lag_add_prune_list(primary_lag, lag->pf);
+ ice_lag_cfg_drop_fltr(lag, true);
+ }
+ /* add filter for primary control packets */
+ ice_lag_cfg_cp_fltr(lag, true);
+ } else {
+ if (!primary_lag && lag->primary)
+ primary_lag = lag;
+
+ if (!lag->primary) {
+ ice_lag_set_swid(0, lag, false);
+ } else {
+ if (primary_lag && lag->primary) {
+ ice_lag_primary_swid(lag, false);
+ ice_lag_del_prune_list(primary_lag, lag->pf);
+ }
+ }
+ /* remove filter for control packets */
+ ice_lag_cfg_cp_fltr(lag, false);
+ }
+}
+
+/**
+ * ice_lag_monitor_link - monitor interfaces entering/leaving the aggregate
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function only operates after a primary has been set.
+ */
+static void ice_lag_monitor_link(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct ice_hw *prim_hw, *active_hw;
+ struct net_device *event_netdev;
+ struct ice_pf *pf;
+ u8 prim_port;
+
+ if (!lag->primary)
return;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ if (!netif_is_same_ice(lag->pf, event_netdev))
+ return;
+
+ pf = lag->pf;
+ prim_hw = &pf->hw;
+ prim_port = prim_hw->port_info->lport;
+
+ info = (struct netdev_notifier_changeupper_info *)ptr;
+ if (info->upper_dev != lag->upper_netdev)
+ return;
+
+ if (!info->linking) {
+ /* Since there are only two interfaces allowed in SRIOV+LAG, if
+ * one port is leaving, then nodes need to be on primary
+ * interface.
+ */
+ if (prim_port != lag->active_port &&
+ lag->active_port != ICE_LAG_INVALID_PORT) {
+ active_hw = ice_lag_find_hw_by_lport(lag,
+ lag->active_port);
+ ice_lag_reclaim_vf_nodes(lag, active_hw);
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ }
}
+}
+
+/**
+ * ice_lag_monitor_active - main PF keep track of which port is active
+ * @lag: lag info struct
+ * @ptr: opaque data containing notifier event
+ *
+ * This function is for the primary PF to monitor changes in which port is
+ * active and handle changes for SRIOV VF functionality
+ */
+static void ice_lag_monitor_active(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct ice_netdev_priv *event_np;
+ struct ice_pf *pf, *event_pf;
+ u8 prim_port, event_port;
+
+ if (!lag->primary)
+ return;
- netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
+ pf = lag->pf;
+ if (!pf)
+ return;
- if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (!netif_is_ice(event_netdev) || event_upper != lag->upper_netdev)
return;
+
+ event_np = netdev_priv(event_netdev);
+ event_pf = event_np->vsi->back;
+ event_port = event_pf->hw.port_info->lport;
+ prim_port = pf->hw.port_info->lport;
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+
+ if (!bonding_info->slave.state) {
+ /* if no port is currently active, then nodes and filters exist
+ * on primary port, check if we need to move them
+ */
+ if (lag->active_port == ICE_LAG_INVALID_PORT) {
+ if (event_port != prim_port)
+ ice_lag_move_vf_nodes(lag, prim_port,
+ event_port);
+ lag->active_port = event_port;
+ return;
+ }
+
+ /* active port is already set and is current event port */
+ if (lag->active_port == event_port)
+ return;
+ /* new active port */
+ ice_lag_move_vf_nodes(lag, lag->active_port, event_port);
+ lag->active_port = event_port;
+ } else {
+ /* port not set as currently active (e.g. new active port
+ * has already claimed the nodes and filters
+ */
+ if (lag->active_port != event_port)
+ return;
+ /* This is the case when neither port is active (both link down)
+ * Link down on the bond - set active port to invalid and move
+ * nodes and filters back to primary if not already there
+ */
+ if (event_port != prim_port)
+ ice_lag_move_vf_nodes(lag, event_port, prim_port);
+ lag->active_port = ICE_LAG_INVALID_PORT;
}
+}
- if (info->linking)
- ice_lag_link(lag, info);
- else
- ice_lag_unlink(lag, info);
+/**
+ * ice_lag_chk_comp - evaluate bonded interface for feature support
+ * @lag: lag info struct
+ * @ptr: opaque data for netdev event info
+ */
+static bool
+ice_lag_chk_comp(struct ice_lag *lag, void *ptr)
+{
+ struct net_device *event_netdev, *event_upper;
+ struct netdev_notifier_bonding_info *info;
+ struct netdev_bonding_info *bonding_info;
+ struct list_head *tmp;
+ struct device *dev;
+ int count = 0;
- ice_display_lag_info(lag);
+ if (!lag->primary)
+ return true;
+
+ event_netdev = netdev_notifier_info_to_dev(ptr);
+ rcu_read_lock();
+ event_upper = netdev_master_upper_dev_get_rcu(event_netdev);
+ rcu_read_unlock();
+ if (event_upper != lag->upper_netdev)
+ return true;
+
+ dev = ice_pf_to_dev(lag->pf);
+
+ /* only supporting switchdev mode for SRIOV VF LAG.
+ * primary interface has to be in switchdev mode
+ */
+ if (!ice_is_switchdev_running(lag->pf)) {
+ dev_info(dev, "Primary interface not in switchdev mode - VF LAG disabled\n");
+ return false;
+ }
+
+ info = (struct netdev_notifier_bonding_info *)ptr;
+ bonding_info = &info->bonding_info;
+ lag->bond_mode = bonding_info->master.bond_mode;
+ if (lag->bond_mode != BOND_MODE_ACTIVEBACKUP) {
+ dev_info(dev, "Bond Mode not ACTIVE-BACKUP - VF LAG disabled\n");
+ return false;
+ }
+
+ list_for_each(tmp, lag->netdev_head) {
+ struct ice_dcbx_cfg *dcb_cfg, *peer_dcb_cfg;
+ struct ice_lag_netdev_list *entry;
+ struct ice_netdev_priv *peer_np;
+ struct net_device *peer_netdev;
+ struct ice_vsi *vsi, *peer_vsi;
+ struct ice_pf *peer_pf;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ peer_netdev = entry->netdev;
+ if (!netif_is_ice(peer_netdev)) {
+ dev_info(dev, "Found %s non-ice netdev in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ count++;
+ if (count > 2) {
+ dev_info(dev, "Found more than two netdevs in LAG - VF LAG disabled\n");
+ return false;
+ }
+
+ peer_np = netdev_priv(peer_netdev);
+ vsi = ice_get_main_vsi(lag->pf);
+ peer_vsi = peer_np->vsi;
+ if (lag->pf->pdev->bus != peer_vsi->back->pdev->bus ||
+ lag->pf->pdev->slot != peer_vsi->back->pdev->slot) {
+ dev_info(dev, "Found %s on different device in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ dcb_cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ peer_dcb_cfg = &peer_vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (memcmp(dcb_cfg, peer_dcb_cfg,
+ sizeof(struct ice_dcbx_cfg))) {
+ dev_info(dev, "Found %s with different DCB in LAG - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+
+ peer_pf = peer_vsi->back;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, peer_pf->flags)) {
+ dev_warn(dev, "Found %s with FW LLDP agent active - VF LAG disabled\n",
+ netdev_name(peer_netdev));
+ return false;
+ }
+ }
+
+ return true;
}
/**
- * ice_lag_changelower_event - handle LAG changelower event
+ * ice_lag_unregister - handle netdev unregister events
* @lag: LAG info struct
- * @ptr: opaque data pointer
+ * @event_netdev: netdev struct for target of notifier event
+ */
+static void
+ice_lag_unregister(struct ice_lag *lag, struct net_device *event_netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_pf *event_pf;
+ struct ice_lag *p_lag;
+
+ p_lag = ice_lag_find_primary(lag);
+ np = netdev_priv(event_netdev);
+ event_pf = np->vsi->back;
+
+ if (p_lag) {
+ if (p_lag->active_port != p_lag->pf->hw.port_info->lport &&
+ p_lag->active_port != ICE_LAG_INVALID_PORT) {
+ struct ice_hw *active_hw;
+
+ active_hw = ice_lag_find_hw_by_lport(lag,
+ p_lag->active_port);
+ if (active_hw)
+ ice_lag_reclaim_vf_nodes(p_lag, active_hw);
+ lag->active_port = ICE_LAG_INVALID_PORT;
+ }
+ }
+
+ /* primary processing for primary */
+ if (lag->primary && lag->netdev == event_netdev)
+ ice_lag_primary_swid(lag, false);
+
+ /* primary processing for secondary */
+ if (lag->primary && lag->netdev != event_netdev)
+ ice_lag_del_prune_list(lag, event_pf);
+
+ /* secondary processing for secondary */
+ if (!lag->primary && lag->netdev == event_netdev)
+ ice_lag_set_swid(0, lag, false);
+}
+
+/**
+ * ice_lag_monitor_rdma - set and clear rdma functionality
+ * @lag: pointer to lag struct
+ * @ptr: opaque data for netdev event info
+ */
+static void
+ice_lag_monitor_rdma(struct ice_lag *lag, void *ptr)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *netdev;
+
+ info = ptr;
+ netdev = netdev_notifier_info_to_dev(ptr);
+
+ if (netdev != lag->netdev)
+ return;
+
+ if (info->linking)
+ ice_clear_rdma_cap(lag->pf);
+ else
+ ice_set_rdma_cap(lag->pf);
+}
+
+/**
+ * ice_lag_chk_disabled_bond - monitor interfaces entering/leaving disabled bond
+ * @lag: lag info struct
+ * @ptr: opaque data containing event
*
- * ptr to be cast to netdev_notifier_changelowerstate_info
+ * as interfaces enter a bond - determine if the bond is currently
+ * SRIOV LAG compliant and flag if not. As interfaces leave the
+ * bond, reset their compliant status.
*/
-static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
+static void ice_lag_chk_disabled_bond(struct ice_lag *lag, void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ struct ice_lag *prim_lag;
if (netdev != lag->netdev)
return;
- netdev_dbg(netdev, "bonding info\n");
+ if (info->linking) {
+ prim_lag = ice_lag_find_primary(lag);
+ if (prim_lag &&
+ !ice_is_feature_supported(prim_lag->pf, ICE_F_SRIOV_LAG)) {
+ ice_clear_feature_support(lag->pf, ICE_F_SRIOV_LAG);
+ netdev_info(netdev, "Interface added to non-compliant SRIOV LAG aggregate\n");
+ }
+ } else {
+ ice_lag_init_feature_support_flag(lag->pf);
+ }
+}
+
+/**
+ * ice_lag_disable_sriov_bond - set members of bond as not supporting SRIOV LAG
+ * @lag: primary interfaces lag struct
+ */
+static void ice_lag_disable_sriov_bond(struct ice_lag *lag)
+{
+ struct ice_netdev_priv *np;
+ struct ice_pf *pf;
+
+ np = netdev_priv(lag->netdev);
+ pf = np->vsi->back;
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+}
+
+/**
+ * ice_lag_process_event - process a task assigned to the lag_wq
+ * @work: pointer to work_struct
+ */
+static void ice_lag_process_event(struct work_struct *work)
+{
+ struct netdev_notifier_changeupper_info *info;
+ struct ice_lag_work *lag_work;
+ struct net_device *netdev;
+ struct list_head *tmp, *n;
+ struct ice_pf *pf;
+
+ lag_work = container_of(work, struct ice_lag_work, lag_task);
+ pf = lag_work->lag->pf;
+
+ mutex_lock(&pf->lag_mutex);
+ lag_work->lag->netdev_head = &lag_work->netdev_list.node;
+
+ switch (lag_work->event) {
+ case NETDEV_CHANGEUPPER:
+ info = &lag_work->info.changeupper_info;
+ ice_lag_chk_disabled_bond(lag_work->lag, info);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ ice_lag_monitor_link(lag_work->lag, info);
+ ice_lag_changeupper_event(lag_work->lag, info);
+ ice_lag_link_unlink(lag_work->lag, info);
+ }
+ ice_lag_monitor_rdma(lag_work->lag, info);
+ break;
+ case NETDEV_BONDING_INFO:
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ if (!ice_lag_chk_comp(lag_work->lag,
+ &lag_work->info.bonding_info)) {
+ netdev = lag_work->info.bonding_info.info.dev;
+ ice_lag_disable_sriov_bond(lag_work->lag);
+ ice_lag_unregister(lag_work->lag, netdev);
+ goto lag_cleanup;
+ }
+ ice_lag_monitor_active(lag_work->lag,
+ &lag_work->info.bonding_info);
+ ice_lag_cfg_pf_fltrs(lag_work->lag,
+ &lag_work->info.bonding_info);
+ }
+ ice_lag_info_event(lag_work->lag, &lag_work->info.bonding_info);
+ break;
+ case NETDEV_UNREGISTER:
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG)) {
+ netdev = lag_work->info.bonding_info.info.dev;
+ if ((netdev == lag_work->lag->netdev ||
+ lag_work->lag->primary) && lag_work->lag->bonded)
+ ice_lag_unregister(lag_work->lag, netdev);
+ }
+ break;
+ default:
+ break;
+ }
+
+lag_cleanup:
+ /* cleanup resources allocated for this work item */
+ list_for_each_safe(tmp, n, &lag_work->netdev_list.node) {
+ struct ice_lag_netdev_list *entry;
+
+ entry = list_entry(tmp, struct ice_lag_netdev_list, node);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+ lag_work->lag->netdev_head = NULL;
+
+ mutex_unlock(&pf->lag_mutex);
- if (!netif_is_lag_port(netdev))
- netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
+ kfree(lag_work);
}
/**
@@ -295,34 +1690,79 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
void *ptr)
{
struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *upper_netdev;
+ struct ice_lag_work *lag_work;
struct ice_lag *lag;
- lag = container_of(notif_blk, struct ice_lag, notif_block);
+ if (!netif_is_ice(netdev))
+ return NOTIFY_DONE;
+
+ if (event != NETDEV_CHANGEUPPER && event != NETDEV_BONDING_INFO &&
+ event != NETDEV_UNREGISTER)
+ return NOTIFY_DONE;
+
+ if (!(netdev->priv_flags & IFF_BONDING))
+ return NOTIFY_DONE;
+ lag = container_of(notif_blk, struct ice_lag, notif_block);
if (!lag->netdev)
return NOTIFY_DONE;
- /* Check that the netdev is in the working namespace */
if (!net_eq(dev_net(netdev), &init_net))
return NOTIFY_DONE;
+ /* This memory will be freed at the end of ice_lag_process_event */
+ lag_work = kzalloc(sizeof(*lag_work), GFP_KERNEL);
+ if (!lag_work)
+ return -ENOMEM;
+
+ lag_work->event_netdev = netdev;
+ lag_work->lag = lag;
+ lag_work->event = event;
+ if (event == NETDEV_CHANGEUPPER) {
+ struct netdev_notifier_changeupper_info *info;
+
+ info = ptr;
+ upper_netdev = info->upper_dev;
+ } else {
+ upper_netdev = netdev_master_upper_dev_get(netdev);
+ }
+
+ INIT_LIST_HEAD(&lag_work->netdev_list.node);
+ if (upper_netdev) {
+ struct ice_lag_netdev_list *nd_list;
+ struct net_device *tmp_nd;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(upper_netdev, tmp_nd) {
+ nd_list = kzalloc(sizeof(*nd_list), GFP_ATOMIC);
+ if (!nd_list)
+ break;
+
+ nd_list->netdev = tmp_nd;
+ list_add(&nd_list->node, &lag_work->netdev_list.node);
+ }
+ rcu_read_unlock();
+ }
+
switch (event) {
case NETDEV_CHANGEUPPER:
- ice_lag_changeupper_event(lag, ptr);
- break;
- case NETDEV_CHANGELOWERSTATE:
- ice_lag_changelower_event(lag, ptr);
+ lag_work->info.changeupper_info =
+ *((struct netdev_notifier_changeupper_info *)ptr);
break;
case NETDEV_BONDING_INFO:
- ice_lag_info_event(lag, ptr);
- break;
- case NETDEV_UNREGISTER:
- ice_lag_unregister(lag, netdev);
+ lag_work->info.bonding_info =
+ *((struct netdev_notifier_bonding_info *)ptr);
break;
default:
+ lag_work->info.notifier_info =
+ *((struct netdev_notifier_info *)ptr);
break;
}
+ INIT_WORK(&lag_work->lag_task, ice_lag_process_event);
+ queue_work(ice_lag_wq, &lag_work->lag_task);
+
return NOTIFY_DONE;
}
@@ -366,6 +1806,166 @@ static void ice_unregister_lag_handler(struct ice_lag *lag)
}
/**
+ * ice_create_lag_recipe
+ * @hw: pointer to HW struct
+ * @rid: pointer to u16 to pass back recipe index
+ * @base_recipe: recipe to base the new recipe on
+ * @prio: priority for new recipe
+ *
+ * function returns 0 on error
+ */
+static int ice_create_lag_recipe(struct ice_hw *hw, u16 *rid,
+ const u8 *base_recipe, u8 prio)
+{
+ struct ice_aqc_recipe_data_elem *new_rcp;
+ int err;
+
+ err = ice_alloc_recipe(hw, rid);
+ if (err)
+ return err;
+
+ new_rcp = kzalloc(ICE_RECIPE_LEN * ICE_MAX_NUM_RECIPES, GFP_KERNEL);
+ if (!new_rcp)
+ return -ENOMEM;
+
+ memcpy(new_rcp, base_recipe, ICE_RECIPE_LEN);
+ new_rcp->content.act_ctrl_fwd_priority = prio;
+ new_rcp->content.rid = *rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ new_rcp->recipe_indx = *rid;
+ bitmap_zero((unsigned long *)new_rcp->recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(*rid, (unsigned long *)new_rcp->recipe_bitmap);
+
+ err = ice_aq_add_recipe(hw, new_rcp, 1, NULL);
+ if (err)
+ *rid = 0;
+
+ kfree(new_rcp);
+ return err;
+}
+
+/**
+ * ice_lag_move_vf_nodes_tc_sync - move a VF's nodes for a tc during reset
+ * @lag: primary interfaces lag struct
+ * @dest_hw: HW struct for destination's interface
+ * @vsi_num: VSI index in PF space
+ * @tc: traffic class to move
+ */
+static void
+ice_lag_move_vf_nodes_tc_sync(struct ice_lag *lag, struct ice_hw *dest_hw,
+ u16 vsi_num, u8 tc)
+{
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ struct device *dev = ice_pf_to_dev(lag->pf);
+ u16 numq, valq, num_moved, qbuf_size;
+ u16 buf_size = __struct_size(buf);
+ struct ice_aqc_cfg_txqs_buf *qbuf;
+ struct ice_sched_node *n_prt;
+ __le32 teid, parent_teid;
+ struct ice_vsi_ctx *ctx;
+ struct ice_hw *hw;
+ u32 tmp_teid;
+
+ hw = &lag->pf->hw;
+ ctx = ice_get_vsi_ctx(hw, vsi_num);
+ if (!ctx) {
+ dev_warn(dev, "LAG rebuild failed after reset due to VSI Context failure\n");
+ return;
+ }
+
+ if (!ctx->sched.vsi_node[tc])
+ return;
+
+ numq = ctx->num_lan_q_entries[tc];
+ teid = ctx->sched.vsi_node[tc]->info.node_teid;
+ tmp_teid = le32_to_cpu(teid);
+ parent_teid = ctx->sched.vsi_node[tc]->info.parent_teid;
+
+ if (!tmp_teid || !numq)
+ return;
+
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, true))
+ dev_dbg(dev, "Problem suspending traffic during reset rebuild\n");
+
+ /* reconfig queues for new port */
+ qbuf_size = struct_size(qbuf, queue_info, numq);
+ qbuf = kzalloc(qbuf_size, GFP_KERNEL);
+ if (!qbuf) {
+ dev_warn(dev, "Failure allocating VF queue recfg buffer for reset rebuild\n");
+ goto resume_sync;
+ }
+
+ /* add the per queue info for the reconfigure command buffer */
+ valq = ice_lag_qbuf_recfg(hw, qbuf, vsi_num, numq, tc);
+ if (!valq) {
+ dev_warn(dev, "Failure to reconfig queues for LAG reset rebuild\n");
+ goto sync_none;
+ }
+
+ if (ice_aq_cfg_lan_txq(hw, qbuf, qbuf_size, numq, hw->port_info->lport,
+ dest_hw->port_info->lport, NULL)) {
+ dev_warn(dev, "Failure to configure queues for LAG reset rebuild\n");
+ goto sync_qerr;
+ }
+
+sync_none:
+ kfree(qbuf);
+
+ /* find parent in destination tree */
+ n_prt = ice_lag_get_sched_parent(dest_hw, tc);
+ if (!n_prt)
+ goto resume_sync;
+
+ /* Move node to new parent */
+ buf->hdr.src_parent_teid = parent_teid;
+ buf->hdr.dest_parent_teid = n_prt->info.node_teid;
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->hdr.mode = ICE_AQC_MOVE_ELEM_MODE_KEEP_OWN;
+ buf->teid[0] = teid;
+
+ if (ice_aq_move_sched_elems(&lag->pf->hw, buf, buf_size, &num_moved))
+ dev_warn(dev, "Failure to move VF nodes for LAG reset rebuild\n");
+ else
+ ice_sched_update_parent(n_prt, ctx->sched.vsi_node[tc]);
+
+ goto resume_sync;
+
+sync_qerr:
+ kfree(qbuf);
+
+resume_sync:
+ if (ice_sched_suspend_resume_elems(hw, 1, &tmp_teid, false))
+ dev_warn(dev, "Problem restarting traffic for LAG node reset rebuild\n");
+}
+
+/**
+ * ice_lag_move_vf_nodes_sync - move vf nodes to active interface
+ * @lag: primary interfaces lag struct
+ * @dest_hw: lport value for currently active port
+ *
+ * This function is used in a reset context, outside of event handling,
+ * to move the VF nodes to the secondary interface when that interface
+ * is the active interface during a reset rebuild
+ */
+static void
+ice_lag_move_vf_nodes_sync(struct ice_lag *lag, struct ice_hw *dest_hw)
+{
+ struct ice_pf *pf;
+ int i, tc;
+
+ if (!lag->primary || !dest_hw)
+ return;
+
+ pf = lag->pf;
+ ice_for_each_vsi(pf, i)
+ if (pf->vsi[i] && (pf->vsi[i]->type == ICE_VSI_VF ||
+ pf->vsi[i]->type == ICE_VSI_SWITCHDEV_CTRL))
+ ice_for_each_traffic_class(tc)
+ ice_lag_move_vf_nodes_tc_sync(lag, dest_hw, i,
+ tc);
+}
+
+/**
* ice_init_lag - initialize support for LAG
* @pf: PF struct
*
@@ -377,7 +1977,12 @@ int ice_init_lag(struct ice_pf *pf)
struct device *dev = ice_pf_to_dev(pf);
struct ice_lag *lag;
struct ice_vsi *vsi;
- int err;
+ u64 recipe_bits = 0;
+ int n, err;
+
+ ice_lag_init_feature_support_flag(pf);
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ return 0;
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)
@@ -394,8 +1999,8 @@ int ice_init_lag(struct ice_pf *pf)
lag->pf = pf;
lag->netdev = vsi->netdev;
lag->role = ICE_LAG_NONE;
+ lag->active_port = ICE_LAG_INVALID_PORT;
lag->bonded = false;
- lag->peer_netdev = NULL;
lag->upper_netdev = NULL;
lag->notif_block.notifier_call = NULL;
@@ -405,11 +2010,39 @@ int ice_init_lag(struct ice_pf *pf)
goto lag_error;
}
+ err = ice_create_lag_recipe(&pf->hw, &lag->pf_recipe,
+ ice_dflt_vsi_rcp, 1);
+ if (err)
+ goto lag_error;
+
+ err = ice_create_lag_recipe(&pf->hw, &lag->lport_recipe,
+ ice_lport_rcp, 3);
+ if (err)
+ goto free_rcp_res;
+
+ /* associate recipes to profiles */
+ for (n = 0; n < ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER; n++) {
+ err = ice_aq_get_recipe_to_profile(&pf->hw, n,
+ (u8 *)&recipe_bits, NULL);
+ if (err)
+ continue;
+
+ if (recipe_bits & BIT(ICE_SW_LKUP_DFLT)) {
+ recipe_bits |= BIT(lag->pf_recipe) |
+ BIT(lag->lport_recipe);
+ ice_aq_map_recipe_to_profile(&pf->hw, n,
+ (u8 *)&recipe_bits, NULL);
+ }
+ }
+
ice_display_lag_info(lag);
dev_dbg(dev, "INIT LAG complete\n");
return 0;
+free_rcp_res:
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->pf_recipe);
lag_error:
kfree(lag);
pf->lag = NULL;
@@ -435,11 +2068,107 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- dev_put(lag->upper_netdev);
+ flush_workqueue(ice_lag_wq);
- dev_put(lag->peer_netdev);
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->pf_recipe);
+ ice_free_hw_res(&pf->hw, ICE_AQC_RES_TYPE_RECIPE, 1,
+ &pf->lag->lport_recipe);
kfree(lag);
pf->lag = NULL;
}
+
+/**
+ * ice_lag_rebuild - rebuild lag resources after reset
+ * @pf: pointer to local pf struct
+ *
+ * PF resets are promoted to CORER resets when interface in an aggregate. This
+ * means that we need to rebuild the PF resources for the interface. Since
+ * this will happen outside the normal event processing, need to acquire the lag
+ * lock.
+ *
+ * This function will also evaluate the VF resources if this is the primary
+ * interface.
+ */
+void ice_lag_rebuild(struct ice_pf *pf)
+{
+ struct ice_lag_netdev_list ndlist;
+ struct ice_lag *lag, *prim_lag;
+ u8 act_port, loc_port;
+
+ if (!pf->lag || !pf->lag->bonded)
+ return;
+
+ mutex_lock(&pf->lag_mutex);
+
+ lag = pf->lag;
+ if (lag->primary) {
+ prim_lag = lag;
+ } else {
+ ice_lag_build_netdev_list(lag, &ndlist);
+ prim_lag = ice_lag_find_primary(lag);
+ }
+
+ if (!prim_lag) {
+ dev_dbg(ice_pf_to_dev(pf), "No primary interface in aggregate, can't rebuild\n");
+ goto lag_rebuild_out;
+ }
+
+ act_port = prim_lag->active_port;
+ loc_port = lag->pf->hw.port_info->lport;
+
+ /* configure SWID for this port */
+ if (lag->primary) {
+ ice_lag_primary_swid(lag, true);
+ } else {
+ ice_lag_set_swid(prim_lag->pf->hw.port_info->sw_id, lag, true);
+ ice_lag_add_prune_list(prim_lag, pf);
+ if (act_port == loc_port)
+ ice_lag_move_vf_nodes_sync(prim_lag, &pf->hw);
+ }
+
+ ice_lag_cfg_cp_fltr(lag, true);
+
+ if (lag->pf_rule_id)
+ if (ice_lag_cfg_dflt_fltr(lag, true))
+ dev_err(ice_pf_to_dev(pf), "Error adding default VSI rule in rebuild\n");
+
+ ice_clear_rdma_cap(pf);
+lag_rebuild_out:
+ ice_lag_destroy_netdev_list(lag, &ndlist);
+ mutex_unlock(&pf->lag_mutex);
+}
+
+/**
+ * ice_lag_is_switchdev_running
+ * @pf: pointer to PF structure
+ *
+ * Check if switchdev is running on any of the interfaces connected to lag.
+ */
+bool ice_lag_is_switchdev_running(struct ice_pf *pf)
+{
+ struct ice_lag *lag = pf->lag;
+ struct net_device *tmp_nd;
+
+ if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag)
+ return false;
+
+ rcu_read_lock();
+ for_each_netdev_in_bond_rcu(lag->upper_netdev, tmp_nd) {
+ struct ice_netdev_priv *priv = netdev_priv(tmp_nd);
+
+ if (!netif_is_ice(tmp_nd) || !priv || !priv->vsi ||
+ !priv->vsi->back)
+ continue;
+
+ if (ice_is_switchdev_running(priv->vsi->back)) {
+ rcu_read_unlock();
+ return true;
+ }
+ }
+ rcu_read_unlock();
+
+ return false;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index 2c373676c42f..ede833dfa658 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -14,20 +14,56 @@ enum ice_lag_role {
ICE_LAG_UNSET
};
+#define ICE_LAG_INVALID_PORT 0xFF
+
+#define ICE_LAG_RESET_RETRIES 5
+
struct ice_pf;
+struct ice_vf;
+
+struct ice_lag_netdev_list {
+ struct list_head node;
+ struct net_device *netdev;
+};
/* LAG info struct */
struct ice_lag {
struct ice_pf *pf; /* backlink to PF struct */
struct net_device *netdev; /* this PF's netdev */
- struct net_device *peer_netdev;
struct net_device *upper_netdev; /* upper bonding netdev */
+ struct list_head *netdev_head;
struct notifier_block notif_block;
+ s32 bond_mode;
+ u16 bond_swid; /* swid for primary interface */
+ u8 active_port; /* lport value for the current active port */
u8 bonded:1; /* currently bonded */
u8 primary:1; /* this is primary */
+ u16 pf_recipe;
+ u16 lport_recipe;
+ u16 pf_rule_id;
+ u16 cp_rule_idx;
+ u16 lport_rule_idx;
u8 role;
};
+/* LAG workqueue struct */
+struct ice_lag_work {
+ struct work_struct lag_task;
+ struct ice_lag_netdev_list netdev_list;
+ struct ice_lag *lag;
+ unsigned long event;
+ struct net_device *event_netdev;
+ union {
+ struct netdev_notifier_changeupper_info changeupper_info;
+ struct netdev_notifier_bonding_info bonding_info;
+ struct netdev_notifier_info notifier_info;
+ } info;
+};
+
+void ice_lag_move_new_vf_nodes(struct ice_vf *vf);
int ice_init_lag(struct ice_pf *pf);
void ice_deinit_lag(struct ice_pf *pf);
+void ice_lag_rebuild(struct ice_pf *pf);
+bool ice_lag_is_switchdev_running(struct ice_pf *pf);
+void ice_lag_move_vf_nodes_cfg(struct ice_lag *lag, u8 src_prt, u8 dst_prt);
#endif /* _ICE_LAG_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 0054d7e64ec3..1bad6e17f9be 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -229,7 +229,7 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
* of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* original vector count
*/
- vsi->num_q_vectors = pf->vfs.num_msix_per - ICE_NONQ_VECS_VF;
+ vsi->num_q_vectors = vf->num_msix - ICE_NONQ_VECS_VF;
break;
case ICE_VSI_CTRL:
vsi->alloc_txq = 1;
@@ -907,6 +907,7 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
{
struct ice_hw_common_caps *cap;
struct ice_pf *pf = vsi->back;
+ u16 max_rss_size;
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
vsi->rss_size = 1;
@@ -914,32 +915,31 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
}
cap = &pf->hw.func_caps.common_cap;
+ max_rss_size = BIT(cap->rss_table_entry_width);
switch (vsi->type) {
case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
if (vsi->type == ICE_VSI_CHNL)
- vsi->rss_size = min_t(u16, vsi->num_rxq,
- BIT(cap->rss_table_entry_width));
+ vsi->rss_size = min_t(u16, vsi->num_rxq, max_rss_size);
else
vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_PF;
break;
case ICE_VSI_SWITCHDEV_CTRL:
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
- vsi->rss_size = min_t(u16, num_online_cpus(),
- BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
+ vsi->rss_size = min_t(u16, num_online_cpus(), max_rss_size);
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
* For VSI_LUT, LUT size should be set to 64 bytes.
*/
- vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vsi->rss_table_size = ICE_LUT_VSI_SIZE;
vsi->rss_size = ICE_MAX_RSS_QS_PER_VF;
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
+ vsi->rss_lut_type = ICE_LUT_VSI;
break;
case ICE_VSI_LB:
break;
@@ -1201,8 +1201,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ctxt->info.q_opt_rss = ((lut_type << ICE_AQ_VSI_Q_OPT_RSS_LUT_S) &
ICE_AQ_VSI_Q_OPT_RSS_LUT_M) |
- ((hash_type << ICE_AQ_VSI_Q_OPT_RSS_HASH_S) &
- ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
+ (hash_type & ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
static void
@@ -1228,6 +1227,17 @@ ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
}
/**
+ * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
+ * @vsi: VSI to check whether or not VLAN pruning is enabled.
+ *
+ * returns true if Rx VLAN pruning is enabled and false otherwise.
+ */
+static bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
+{
+ return vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+}
+
+/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
* @vsi_flags: VSI configuration flags
@@ -1685,6 +1695,27 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
+ */
+static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
+{
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+ vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+ vsi->rx_buf_len = ICE_RXBUF_1664;
+#if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+ vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+#endif
+ } else {
+ vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+ vsi->rx_buf_len = ICE_RXBUF_3072;
+ }
+}
+
+/**
* ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to PF to check
*
@@ -1759,27 +1790,6 @@ void ice_update_eth_stats(struct ice_vsi *vsi)
}
/**
- * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
- * @vsi: VSI
- */
-void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
-{
- if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
- vsi->rx_buf_len = ICE_RXBUF_1664;
-#if (PAGE_SIZE < 8192)
- } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
- (vsi->netdev->mtu <= ETH_DATA_LEN)) {
- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
-#endif
- } else {
- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
- vsi->rx_buf_len = ICE_RXBUF_3072;
- }
-}
-
-/**
* ice_write_qrxflxp_cntxt - write/configure QRXFLXP_CNTXT register
* @hw: HW pointer
* @pf_q: index of the Rx queue in the PF's queue space
@@ -1821,21 +1831,14 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- int err;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
if (q_idx >= vsi->alloc_txq || !tx_rings || !tx_rings[q_idx])
return -EINVAL;
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
- err = ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
- kfree(qg_buf);
- return err;
+ return ice_vsi_cfg_txq(vsi, tx_rings[q_idx], qg_buf);
}
/**
@@ -1877,24 +1880,18 @@ setup_rings:
static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
- u16 q_idx = 0;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
int err = 0;
-
- qg_buf = kzalloc(struct_size(qg_buf, txqs, 1), GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
+ u16 q_idx;
qg_buf->num_txqs = 1;
for (q_idx = 0; q_idx < count; q_idx++) {
err = ice_vsi_cfg_txq(vsi, rings[q_idx], qg_buf);
if (err)
- goto err_cfg_txqs;
+ break;
}
-err_cfg_txqs:
- kfree(qg_buf);
return err;
}
@@ -2185,20 +2182,6 @@ bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi)
return false;
}
-/**
- * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not
- * @vsi: VSI to check whether or not VLAN pruning is enabled.
- *
- * returns true if Rx VLAN pruning is enabled and false otherwise.
- */
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
-{
- if (!vsi)
- return false;
-
- return (vsi->info.sw_flags2 & ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA);
-}
-
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
@@ -2388,6 +2371,9 @@ static int ice_vsi_cfg_tc_lan(struct ice_pf *pf, struct ice_vsi *vsi)
} else {
max_txqs[i] = vsi->alloc_txq;
}
+
+ if (vsi->type == ICE_VSI_PF)
+ max_txqs[i] += vsi->num_xdp_txq;
}
dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
@@ -2637,10 +2623,6 @@ void ice_vsi_decfg(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
- if (vsi->agg_node) {
- vsi->agg_node->valid = false;
- vsi->agg_node->agg_id = 0;
- }
}
/**
@@ -2944,21 +2926,6 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/**
- * ice_napi_del - Remove NAPI handler for the VSI
- * @vsi: VSI for which NAPI handler is to be removed
- */
-void ice_napi_del(struct ice_vsi *vsi)
-{
- int v_idx;
-
- if (!vsi->netdev)
- return;
-
- ice_for_each_q_vector(vsi, v_idx)
- netif_napi_del(&vsi->q_vectors[v_idx]->napi);
-}
-
-/**
* ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed
*
@@ -3593,6 +3560,12 @@ int ice_set_dflt_vsi(struct ice_vsi *vsi)
dev = ice_pf_to_dev(vsi->back);
+ if (ice_lag_is_switchdev_running(vsi->back)) {
+ dev_dbg(dev, "VSI %d passed is a part of LAG containing interfaces in switchdev mode, nothing to do\n",
+ vsi->vsi_num);
+ return 0;
+ }
+
/* the VSI passed in is already the default VSI */
if (ice_is_vsi_dflt_vsi(vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
@@ -3970,7 +3943,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to set
*/
-static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
{
if (f < 0 || f >= ICE_F_MAX)
return;
@@ -4003,13 +3976,21 @@ void ice_init_feature_support(struct ice_pf *pf)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
+ case ICE_DEV_ID_E810_XXV_SFP:
ice_set_feature_support(pf, ICE_F_DSCP);
- ice_set_feature_support(pf, ICE_F_PTP_EXTTS);
- if (ice_is_e810t(&pf->hw)) {
+ if (ice_is_phy_rclk_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_PHY_RCLK);
+ /* If we don't own the timer - don't enable other caps */
+ if (!ice_pf_src_tmr_owned(pf))
+ break;
+ if (ice_is_cgu_in_netlist(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_CGU);
+ if (ice_is_clock_mux_in_netlist(&pf->hw))
ice_set_feature_support(pf, ICE_F_SMA_CTRL);
- if (ice_gnss_is_gps_present(&pf->hw))
- ice_set_feature_support(pf, ICE_F_GNSS);
- }
+ if (ice_gnss_is_gps_present(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_GNSS);
break;
default:
break;
@@ -4076,3 +4057,28 @@ void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
{
ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
}
+
+/**
+ * ice_vsi_update_local_lb - update sw block in VSI with local loopback bit
+ * @vsi: pointer to VSI structure
+ * @set: set or unset the bit
+ */
+int
+ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set)
+{
+ struct ice_vsi_ctx ctx = {
+ .info = vsi->info,
+ };
+
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ if (set)
+ ctx.info.sw_flags |= ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+ else
+ ctx.info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_LOCAL_LB;
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index e985766e6bb5..f24f5d1e6f9c 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -76,8 +76,6 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi);
int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
-bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
@@ -93,8 +91,6 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_vsi_cfg_params *params);
-void ice_napi_del(struct ice_vsi *vsi);
-
int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
@@ -130,7 +126,6 @@ void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
-void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
@@ -157,11 +152,13 @@ void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+int ice_vsi_update_local_lb(struct ice_vsi *vsi, bool set);
int ice_vsi_add_vlan_zero(struct ice_vsi *vsi);
int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index b40dfe6ae321..fb9c93f37e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -1,11 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
/* Intel(R) Ethernet Connection E800 Series Linux Driver */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <generated/utsrelease.h>
+#include <linux/crash_dump.h>
#include "ice.h"
#include "ice_base.h"
#include "ice_lib.h"
@@ -64,6 +65,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw)
}
static struct workqueue_struct *ice_wq;
+struct workqueue_struct *ice_lag_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -80,7 +82,7 @@ ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
void *data,
void (*cleanup)(struct flow_block_cb *block_cb));
-bool netif_is_ice(struct net_device *dev)
+bool netif_is_ice(const struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
}
@@ -635,6 +637,11 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
+ if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) {
+ dev_dbg(dev, "PFR on a bonded interface, promoting to CORER\n");
+ reset_type = ICE_RESET_CORER;
+ }
+
ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
@@ -718,8 +725,13 @@ static void ice_reset_subtask(struct ice_pf *pf)
}
/* No pending resets to finish processing. Check for new resets */
- if (test_bit(ICE_PFR_REQ, pf->state))
+ if (test_bit(ICE_PFR_REQ, pf->state)) {
reset_type = ICE_RESET_PFR;
+ if (pf->lag && pf->lag->bonded) {
+ dev_dbg(ice_pf_to_dev(pf), "PFR on a bonded interface, promoting to CORER\n");
+ reset_type = ICE_RESET_CORER;
+ }
+ }
if (test_bit(ICE_CORER_REQ, pf->state))
reset_type = ICE_RESET_CORER;
if (test_bit(ICE_GLOBR_REQ, pf->state))
@@ -1239,64 +1251,63 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
return status;
}
-enum ice_aq_task_state {
- ICE_AQ_TASK_WAITING = 0,
- ICE_AQ_TASK_COMPLETE,
- ICE_AQ_TASK_CANCELED,
-};
-
-struct ice_aq_task {
- struct hlist_node entry;
+/**
+ * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
+ * @pf: pointer to the PF private structure
+ * @task: intermediate helper storage and identifier for waiting
+ * @opcode: the opcode to wait for
+ *
+ * Prepares to wait for a specific AdminQ completion event on the ARQ for
+ * a given PF. Actual wait would be done by a call to ice_aq_wait_for_event().
+ *
+ * Calls are separated to allow caller registering for event before sending
+ * the command, which mitigates a race between registering and FW responding.
+ *
+ * To obtain only the descriptor contents, pass an task->event with null
+ * msg_buf. If the complete data buffer is desired, allocate the
+ * task->event.msg_buf with enough space ahead of time.
+ */
+void ice_aq_prep_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ u16 opcode)
+{
+ INIT_HLIST_NODE(&task->entry);
+ task->opcode = opcode;
+ task->state = ICE_AQ_TASK_WAITING;
- u16 opcode;
- struct ice_rq_event_info *event;
- enum ice_aq_task_state state;
-};
+ spin_lock_bh(&pf->aq_wait_lock);
+ hlist_add_head(&task->entry, &pf->aq_wait_list);
+ spin_unlock_bh(&pf->aq_wait_lock);
+}
/**
* ice_aq_wait_for_event - Wait for an AdminQ event from firmware
* @pf: pointer to the PF private structure
- * @opcode: the opcode to wait for
+ * @task: ptr prepared by ice_aq_prep_for_event()
* @timeout: how long to wait, in jiffies
- * @event: storage for the event info
*
* Waits for a specific AdminQ completion event on the ARQ for a given PF. The
* current thread will be put to sleep until the specified event occurs or
* until the given timeout is reached.
*
- * To obtain only the descriptor contents, pass an event without an allocated
- * msg_buf. If the complete data buffer is desired, allocate the
- * event->msg_buf with enough space ahead of time.
- *
* Returns: zero on success, or a negative error code on failure.
*/
-int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
- struct ice_rq_event_info *event)
+int ice_aq_wait_for_event(struct ice_pf *pf, struct ice_aq_task *task,
+ unsigned long timeout)
{
+ enum ice_aq_task_state *state = &task->state;
struct device *dev = ice_pf_to_dev(pf);
- struct ice_aq_task *task;
- unsigned long start;
+ unsigned long start = jiffies;
long ret;
int err;
- task = kzalloc(sizeof(*task), GFP_KERNEL);
- if (!task)
- return -ENOMEM;
-
- INIT_HLIST_NODE(&task->entry);
- task->opcode = opcode;
- task->event = event;
- task->state = ICE_AQ_TASK_WAITING;
-
- spin_lock_bh(&pf->aq_wait_lock);
- hlist_add_head(&task->entry, &pf->aq_wait_list);
- spin_unlock_bh(&pf->aq_wait_lock);
-
- start = jiffies;
-
- ret = wait_event_interruptible_timeout(pf->aq_wait_queue, task->state,
+ ret = wait_event_interruptible_timeout(pf->aq_wait_queue,
+ *state != ICE_AQ_TASK_WAITING,
timeout);
- switch (task->state) {
+ switch (*state) {
+ case ICE_AQ_TASK_NOT_PREPARED:
+ WARN(1, "call to %s without ice_aq_prep_for_event()", __func__);
+ err = -EINVAL;
+ break;
case ICE_AQ_TASK_WAITING:
err = ret < 0 ? ret : -ETIMEDOUT;
break;
@@ -1307,7 +1318,7 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
err = ret < 0 ? ret : 0;
break;
default:
- WARN(1, "Unexpected AdminQ wait task state %u", task->state);
+ WARN(1, "Unexpected AdminQ wait task state %u", *state);
err = -EINVAL;
break;
}
@@ -1315,12 +1326,11 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
dev_dbg(dev, "Waited %u msecs (max %u msecs) for firmware response to op 0x%04x\n",
jiffies_to_msecs(jiffies - start),
jiffies_to_msecs(timeout),
- opcode);
+ task->opcode);
spin_lock_bh(&pf->aq_wait_lock);
hlist_del(&task->entry);
spin_unlock_bh(&pf->aq_wait_lock);
- kfree(task);
return err;
}
@@ -1346,23 +1356,26 @@ int ice_aq_wait_for_event(struct ice_pf *pf, u16 opcode, unsigned long timeout,
static void ice_aq_check_events(struct ice_pf *pf, u16 opcode,
struct ice_rq_event_info *event)
{
+ struct ice_rq_event_info *task_ev;
struct ice_aq_task *task;
bool found = false;
spin_lock_bh(&pf->aq_wait_lock);
hlist_for_each_entry(task, &pf->aq_wait_list, entry) {
- if (task->state || task->opcode != opcode)
+ if (task->state != ICE_AQ_TASK_WAITING)
+ continue;
+ if (task->opcode != opcode)
continue;
- memcpy(&task->event->desc, &event->desc, sizeof(event->desc));
- task->event->msg_len = event->msg_len;
+ task_ev = &task->event;
+ memcpy(&task_ev->desc, &event->desc, sizeof(event->desc));
+ task_ev->msg_len = event->msg_len;
/* Only copy the data buffer if a destination was set */
- if (task->event->msg_buf &&
- task->event->buf_len > event->buf_len) {
- memcpy(task->event->msg_buf, event->msg_buf,
+ if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) {
+ memcpy(task_ev->msg_buf, event->msg_buf,
event->buf_len);
- task->event->buf_len = event->buf_len;
+ task_ev->buf_len = event->buf_len;
}
task->state = ICE_AQ_TASK_COMPLETE;
@@ -1746,7 +1759,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
wr32(hw, GL_MDET_TX_PQM, 0xffffffff);
}
- reg = rd32(hw, GL_MDET_TX_TCLAN);
+ reg = rd32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & GL_MDET_TX_TCLAN_VALID_M) {
u8 pf_num = (reg & GL_MDET_TX_TCLAN_PF_NUM_M) >>
GL_MDET_TX_TCLAN_PF_NUM_S;
@@ -1760,7 +1773,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
- wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
+ wr32(hw, GL_MDET_TX_TCLAN_BY_MAC(hw), U32_MAX);
}
reg = rd32(hw, GL_MDET_RX);
@@ -1788,9 +1801,9 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
dev_info(dev, "Malicious Driver Detection event TX_PQM detected on PF\n");
}
- reg = rd32(hw, PF_MDET_TX_TCLAN);
+ reg = rd32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw));
if (reg & PF_MDET_TX_TCLAN_VALID_M) {
- wr32(hw, PF_MDET_TX_TCLAN, 0xFFFF);
+ wr32(hw, PF_MDET_TX_TCLAN_BY_MAC(hw), 0xffff);
if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event TX_TCLAN detected on PF\n");
}
@@ -3137,7 +3150,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- if (!hw->reset_ongoing)
+ if (!hw->reset_ongoing && ice_ptp_pf_handles_tx_interrupt(pf))
set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread);
}
@@ -3147,7 +3160,7 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ena_mask &= ~PFINT_OICR_TSYN_EVNT_M;
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
/* Save EVENTs from GLTSYN register */
pf->ptp.ext_ts_irq |= gltsyn_stat &
(GLTSYN_STAT_EVENT0_M |
@@ -3392,6 +3405,7 @@ static void ice_set_ops(struct ice_vsi *vsi)
netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
NETDEV_XDP_ACT_XSK_ZEROCOPY |
NETDEV_XDP_ACT_RX_SG;
+ netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD;
}
/**
@@ -3794,6 +3808,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
@@ -3856,7 +3871,8 @@ static void ice_set_pf_caps(struct ice_pf *pf)
}
clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
- if (func_caps->common_cap.ieee_1588)
+ if (func_caps->common_cap.ieee_1588 &&
+ !(pf->hw.mac_type == ICE_MAC_E830))
set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags);
pf->max_pf_txqs = func_caps->common_cap.num_txq;
@@ -3874,6 +3890,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
+ mutex_init(&pf->lag_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -4506,6 +4523,31 @@ static void ice_deinit_eth(struct ice_pf *pf)
ice_decfg_netdev(vsi);
}
+/**
+ * ice_wait_for_fw - wait for full FW readiness
+ * @hw: pointer to the hardware structure
+ * @timeout: milliseconds that can elapse before timing out
+ */
+static int ice_wait_for_fw(struct ice_hw *hw, u32 timeout)
+{
+ int fw_loading;
+ u32 elapsed = 0;
+
+ while (elapsed <= timeout) {
+ fw_loading = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_LOADING_M;
+
+ /* firmware was not yet loaded, we have to wait more */
+ if (fw_loading) {
+ elapsed += 100;
+ msleep(100);
+ continue;
+ }
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
static int ice_init_dev(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
@@ -4518,6 +4560,18 @@ static int ice_init_dev(struct ice_pf *pf)
return err;
}
+ /* Some cards require longer initialization times
+ * due to necessity of loading FW from an external source.
+ * This can take even half a minute.
+ */
+ if (ice_is_pf_c827(hw)) {
+ err = ice_wait_for_fw(hw, 30000);
+ if (err) {
+ dev_err(dev, "ice_wait_for_fw timed out");
+ return err;
+ }
+ }
+
ice_init_feature_support(pf);
ice_request_fw(pf);
@@ -4613,6 +4667,10 @@ static void ice_init_features(struct ice_pf *pf)
if (ice_is_feature_supported(pf, ICE_F_GNSS))
ice_gnss_init(pf);
+ if (ice_is_feature_supported(pf, ICE_F_CGU) ||
+ ice_is_feature_supported(pf, ICE_F_PHY_RCLK))
+ ice_dpll_init(pf);
+
/* Note: Flow director init failure is non-fatal to load */
if (ice_init_fdir(pf))
dev_err(dev, "could not initialize flow director\n");
@@ -4631,6 +4689,9 @@ static void ice_init_features(struct ice_pf *pf)
static void ice_deinit_features(struct ice_pf *pf)
{
+ if (ice_is_safe_mode(pf))
+ return;
+
ice_deinit_lag(pf);
if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags))
ice_cfg_lldp_mib_change(&pf->hw, false);
@@ -4639,6 +4700,8 @@ static void ice_deinit_features(struct ice_pf *pf)
ice_gnss_exit(pf);
if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
ice_ptp_release(pf);
+ if (test_bit(ICE_FLAG_DPLL, pf->flags))
+ ice_dpll_deinit(pf);
}
static void ice_init_wakeup(struct ice_pf *pf)
@@ -4962,6 +5025,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
return -EINVAL;
}
+ /* when under a kdump kernel initiate a reset before enabling the
+ * device in order to clear out any pending DMA transactions. These
+ * transactions can cause some systems to machine check when doing
+ * the pcim_enable_device() below.
+ */
+ if (is_kdump_kernel()) {
+ pci_save_state(pdev);
+ pci_clear_master(pdev);
+ err = pcie_flr(pdev);
+ if (err)
+ return err;
+ pci_restore_state(pdev);
+ }
+
/* this driver uses devres, see
* Documentation/driver-api/driver-model/devres.rst
*/
@@ -5465,7 +5542,7 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
- ice_restore_all_vfs_msi_state(pdev);
+ ice_restore_all_vfs_msi_state(pf);
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
@@ -5508,34 +5585,38 @@ static void ice_pci_err_reset_done(struct pci_dev *pdev)
* Class, Class Mask, private data (not used) }
*/
static const struct pci_device_id ice_pci_tbl[] = {
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP), 0 },
- { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_10G_BASE_T) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_1GBE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E823L_QSFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E822_SI_DFLT) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_BACKPLANE) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_QSFP56) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP) },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E830_SFP_DD) },
/* required last entry */
- { 0, }
+ {}
};
MODULE_DEVICE_TABLE(pci, ice_pci_tbl);
@@ -5559,6 +5640,8 @@ static struct pci_driver ice_driver = {
#endif /* CONFIG_PM */
.shutdown = ice_shutdown,
.sriov_configure = ice_sriov_configure,
+ .sriov_get_vf_total_msix = ice_sriov_get_vf_total_msix,
+ .sriov_set_msix_vec_count = ice_sriov_set_msix_vec_count,
.err_handler = &ice_pci_err_handler
};
@@ -5570,23 +5653,37 @@ static struct pci_driver ice_driver = {
*/
static int __init ice_module_init(void)
{
- int status;
+ int status = -ENOMEM;
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
+ ice_adv_lnk_speed_maps_init();
+
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
- return -ENOMEM;
+ return status;
+ }
+
+ ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
+ if (!ice_lag_wq) {
+ pr_err("Failed to create LAG workqueue\n");
+ goto err_dest_wq;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
- destroy_workqueue(ice_wq);
+ goto err_dest_lag_wq;
}
+ return 0;
+
+err_dest_lag_wq:
+ destroy_workqueue(ice_lag_wq);
+err_dest_wq:
+ destroy_workqueue(ice_wq);
return status;
}
module_init(ice_module_init);
@@ -5601,6 +5698,7 @@ static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
+ destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
@@ -5703,7 +5801,7 @@ static void ice_set_rx_mode(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- if (!vsi)
+ if (!vsi || ice_is_switchdev_running(vsi->back))
return;
/* Set the flags to synchronize filters
@@ -6255,7 +6353,7 @@ static void ice_tx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
@@ -6275,7 +6373,7 @@ static void ice_rx_dim_work(struct work_struct *work)
u16 itr;
dim = container_of(work, struct dim, work);
- rc = (struct ice_ring_container *)dim->priv;
+ rc = dim->priv;
WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
@@ -7303,10 +7401,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
- /* configure PTP timestamping after VSI rebuild */
- if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags))
- ice_ptp_cfg_timestamp(pf, false);
-
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
if (err) {
dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
@@ -7356,6 +7450,11 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
clear_bit(ICE_RESET_FAILED, pf->state);
ice_plug_aux_dev(pf);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ ice_lag_rebuild(pf);
+
+ /* Restore timestamp mode settings after VSI rebuild */
+ ice_ptp_restore_timestamp_mode(pf);
return;
err_vsi_rebuild:
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 6a9364761165..f6f27361c3cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -287,6 +287,7 @@ struct ice_nvgre_hdr {
* M = EVLAN (0x8100) - Outer L2 header has EVLAN (ethernet type 0x8100)
* N = EVLAN (0x9100) - Outer L2 header has EVLAN (ethernet type 0x9100)
*/
+#define ICE_PKT_FROM_NETWORK BIT(3)
#define ICE_PKT_VLAN_STAG BIT(12)
#define ICE_PKT_VLAN_ITAG BIT(13)
#define ICE_PKT_VLAN_EVLAN (BIT(14) | BIT(15))
@@ -392,10 +393,10 @@ enum ice_hw_metadata_offset {
};
enum ice_pkt_flags {
- ICE_PKT_FLAGS_VLAN = 0,
- ICE_PKT_FLAGS_TUNNEL = 1,
- ICE_PKT_FLAGS_TCP = 2,
- ICE_PKT_FLAGS_ERROR = 3,
+ ICE_PKT_FLAGS_MDID20 = 0,
+ ICE_PKT_FLAGS_MDID21 = 1,
+ ICE_PKT_FLAGS_MDID22 = 2,
+ ICE_PKT_FLAGS_MDID23 = 3,
};
struct ice_hw_metadata {
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 81d96a40d5a7..71f405f8a6fe 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -39,8 +39,8 @@ ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
/* initialize with defaults */
for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
- snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
- "%s", ice_pin_desc_e810t[i].name);
+ strscpy(ptp_pins[i].name, ice_pin_desc_e810t[i].name,
+ sizeof(ptp_pins[i].name));
ptp_pins[i].index = ice_pin_desc_e810t[i].index;
ptp_pins[i].func = ice_pin_desc_e810t[i].func;
ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
@@ -256,36 +256,42 @@ ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
}
/**
- * ice_set_tx_tstamp - Enable or disable Tx timestamping
- * @pf: The PF pointer to search in
- * @on: bool value for whether timestamps are enabled or disabled
+ * ice_ptp_cfg_tx_interrupt - Configure Tx timestamp interrupt for the device
+ * @pf: Board private structure
+ *
+ * Program the device to respond appropriately to the Tx timestamp interrupt
+ * cause.
*/
-static void ice_set_tx_tstamp(struct ice_pf *pf, bool on)
+static void ice_ptp_cfg_tx_interrupt(struct ice_pf *pf)
{
- struct ice_vsi *vsi;
+ struct ice_hw *hw = &pf->hw;
+ bool enable;
u32 val;
- u16 i;
-
- vsi = ice_get_main_vsi(pf);
- if (!vsi)
- return;
- /* Set the timestamp enable flag for all the Tx rings */
- ice_for_each_txq(vsi, i) {
- if (!vsi->tx_rings[i])
- continue;
- vsi->tx_rings[i]->ptp_tx = on;
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* React to interrupts across all quads. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x1f);
+ enable = true;
+ break;
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* Do not react to interrupts on any quad. */
+ wr32(hw, PFINT_TSYN_MSK + (0x4 * hw->pf_id), (u32)0x0);
+ enable = false;
+ break;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ default:
+ enable = pf->ptp.tstamp_config.tx_type == HWTSTAMP_TX_ON;
+ break;
}
/* Configure the Tx timestamp interrupt */
- val = rd32(&pf->hw, PFINT_OICR_ENA);
- if (on)
+ val = rd32(hw, PFINT_OICR_ENA);
+ if (enable)
val |= PFINT_OICR_TSYN_TX_M;
else
val &= ~PFINT_OICR_TSYN_TX_M;
- wr32(&pf->hw, PFINT_OICR_ENA, val);
-
- pf->ptp.tstamp_config.tx_type = on ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ wr32(hw, PFINT_OICR_ENA, val);
}
/**
@@ -299,7 +305,7 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
u16 i;
vsi = ice_get_main_vsi(pf);
- if (!vsi)
+ if (!vsi || !vsi->rx_rings)
return;
/* Set the timestamp flag for all the Rx rings */
@@ -308,148 +314,50 @@ static void ice_set_rx_tstamp(struct ice_pf *pf, bool on)
continue;
vsi->rx_rings[i]->ptp_rx = on;
}
-
- pf->ptp.tstamp_config.rx_filter = on ? HWTSTAMP_FILTER_ALL :
- HWTSTAMP_FILTER_NONE;
}
/**
- * ice_ptp_cfg_timestamp - Configure timestamp for init/deinit
+ * ice_ptp_disable_timestamp_mode - Disable current timestamp mode
* @pf: Board private structure
- * @ena: bool value to enable or disable time stamp
- *
- * This function will configure timestamping during PTP initialization
- * and deinitialization
- */
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena)
-{
- ice_set_tx_tstamp(pf, ena);
- ice_set_rx_tstamp(pf, ena);
-}
-
-/**
- * ice_get_ptp_clock_index - Get the PTP clock index
- * @pf: the PF pointer
- *
- * Determine the clock index of the PTP clock associated with this device. If
- * this is the PF controlling the clock, just use the local access to the
- * clock device pointer.
- *
- * Otherwise, read from the driver shared parameters to determine the clock
- * index value.
- *
- * Returns: the index of the PTP clock associated with this device, or -1 if
- * there is no associated clock.
- */
-int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
- struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- /* Use the ptp_clock structure if we're the main PF */
- if (pf->ptp.clock)
- return ptp_clock_index(pf->ptp.clock);
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
-
- err = ice_aq_get_driver_param(hw, param_idx, &value, NULL);
- if (err) {
- dev_err(dev, "Failed to read PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- return -1;
- }
-
- /* The PTP clock index is an integer, and will be between 0 and
- * INT_MAX. The highest bit of the driver shared parameter is used to
- * indicate whether or not the currently stored clock index is valid.
- */
- if (!(value & PTP_SHARED_CLK_IDX_VALID))
- return -1;
-
- return value & ~PTP_SHARED_CLK_IDX_VALID;
-}
-
-/**
- * ice_set_ptp_clock_index - Set the PTP clock index
- * @pf: the PF pointer
- *
- * Set the PTP clock index for this device into the shared driver parameters,
- * so that other PFs associated with this device can read it.
*
- * If the PF is unable to store the clock index, it will log an error, but
- * will continue operating PTP.
+ * Called during preparation for reset to temporarily disable timestamping on
+ * the device. Called during remove to disable timestamping while cleaning up
+ * driver resources.
*/
-static void ice_set_ptp_clock_index(struct ice_pf *pf)
+static void ice_ptp_disable_timestamp_mode(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- u32 value;
- int err;
-
- if (!pf->ptp.clock)
- return;
-
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+ u32 val;
- value = (u32)ptp_clock_index(pf->ptp.clock);
- if (value > INT_MAX) {
- dev_err(dev, "PTP Clock index is too large to store\n");
- return;
- }
- value |= PTP_SHARED_CLK_IDX_VALID;
+ val = rd32(hw, PFINT_OICR_ENA);
+ val &= ~PFINT_OICR_TSYN_TX_M;
+ wr32(hw, PFINT_OICR_ENA, val);
- err = ice_aq_set_driver_param(hw, param_idx, value, NULL);
- if (err) {
- dev_err(dev, "Failed to set PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
+ ice_set_rx_tstamp(pf, false);
}
/**
- * ice_clear_ptp_clock_index - Clear the PTP clock index
- * @pf: the PF pointer
+ * ice_ptp_restore_timestamp_mode - Restore timestamp configuration
+ * @pf: Board private structure
*
- * Clear the PTP clock index for this device. Must be called when
- * unregistering the PTP clock, in order to ensure other PFs stop reporting
- * a clock object that no longer exists.
+ * Called at the end of rebuild to restore timestamp configuration after
+ * a device reset.
*/
-static void ice_clear_ptp_clock_index(struct ice_pf *pf)
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf)
{
- struct device *dev = ice_pf_to_dev(pf);
- enum ice_aqc_driver_params param_idx;
struct ice_hw *hw = &pf->hw;
- u8 tmr_idx;
- int err;
+ bool enable_rx;
- /* Do not clear the index if we don't own the timer */
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
- return;
+ ice_ptp_cfg_tx_interrupt(pf);
- tmr_idx = hw->func_caps.ts_func_info.tmr_index_assoc;
- if (!tmr_idx)
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR0;
- else
- param_idx = ICE_AQC_DRIVER_PARAM_CLK_IDX_TMR1;
+ enable_rx = pf->ptp.tstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ ice_set_rx_tstamp(pf, enable_rx);
- err = ice_aq_set_driver_param(hw, param_idx, 0, NULL);
- if (err) {
- dev_dbg(dev, "Failed to clear PTP clock index parameter, err %d aq_err %s\n",
- err, ice_aq_str(hw->adminq.sq_last_status));
- }
+ /* Trigger an immediate software interrupt to ensure that timestamps
+ * which occurred during reset are handled now.
+ */
+ wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M);
+ ice_flush(hw);
}
/**
@@ -674,9 +582,6 @@ static void ice_ptp_process_tx_tstamp(struct ice_ptp_tx *tx)
int err;
u8 idx;
- if (!tx->init)
- return;
-
ptp_port = container_of(tx, struct ice_ptp_port, tx);
pf = ptp_port_to_pf(ptp_port);
hw = &pf->hw;
@@ -775,6 +680,39 @@ skip_ts_read:
}
/**
+ * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device
+ * @pf: Board private structure
+ */
+static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf)
+{
+ struct ice_ptp_port *port;
+ unsigned int i;
+
+ mutex_lock(&pf->ptp.ports_owner.lock);
+ list_for_each_entry(port, &pf->ptp.ports_owner.ports, list_member) {
+ struct ice_ptp_tx *tx = &port->tx;
+
+ if (!tx || !tx->init)
+ continue;
+
+ ice_ptp_process_tx_tstamp(tx);
+ }
+ mutex_unlock(&pf->ptp.ports_owner.lock);
+
+ for (i = 0; i < ICE_MAX_QUAD; i++) {
+ u64 tstamp_ready;
+ int err;
+
+ /* Read the Tx ready status first */
+ err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready);
+ if (err || tstamp_ready)
+ return ICE_TX_TSTAMP_WORK_PENDING;
+ }
+
+ return ICE_TX_TSTAMP_WORK_DONE;
+}
+
+/**
* ice_ptp_tx_tstamp - Process Tx timestamps for this function.
* @tx: Tx tracking structure to initialize
*
@@ -1366,6 +1304,7 @@ out_unlock:
void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
struct ice_ptp_port *ptp_port;
+ struct ice_hw *hw = &pf->hw;
if (!test_bit(ICE_FLAG_PTP, pf->flags))
return;
@@ -1380,11 +1319,16 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
/* Update cached link status for this port immediately */
ptp_port->link_up = linkup;
- /* E810 devices do not need to reconfigure the PHY */
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
+ /* Do not reconfigure E810 PHY */
return;
-
- ice_ptp_port_phy_restart(ptp_port);
+ case ICE_PHY_E822:
+ ice_ptp_port_phy_restart(ptp_port);
+ return;
+ default:
+ dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__);
+ }
}
/**
@@ -1441,6 +1385,24 @@ static void ice_ptp_reset_phy_timestamping(struct ice_pf *pf)
}
/**
+ * ice_ptp_restart_all_phy - Restart all PHYs to recalibrate timestamping
+ * @pf: Board private structure
+ */
+static void ice_ptp_restart_all_phy(struct ice_pf *pf)
+{
+ struct list_head *entry;
+
+ list_for_each(entry, &pf->ptp.ports_owner.ports) {
+ struct ice_ptp_port *port = list_entry(entry,
+ struct ice_ptp_port,
+ list_member);
+
+ if (port->link_up)
+ ice_ptp_port_phy_restart(port);
+ }
+}
+
+/**
* ice_ptp_adjfine - Adjust clock increment rate
* @info: the driver's PTP info structure
* @scaled_ppm: Parts per million with 16-bit fractional field
@@ -1877,9 +1839,9 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
- /* Recalibrate and re-enable timestamp block */
- if (pf->ptp.port.link_up)
- ice_ptp_port_phy_restart(&pf->ptp.port);
+ /* Recalibrate and re-enable timestamp blocks for E822/E823 */
+ if (hw->phy_model == ICE_PHY_E822)
+ ice_ptp_restart_all_phy(pf);
exit:
if (err) {
dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err);
@@ -1976,21 +1938,32 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
u32 hh_lock, hh_art_ctl;
int i;
- /* Get the HW lock */
- hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+#define MAX_HH_HW_LOCK_TRIES 5
+#define MAX_HH_CTL_LOCK_TRIES 100
+
+ for (i = 0; i < MAX_HH_HW_LOCK_TRIES; i++) {
+ /* Get the HW lock */
+ hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
+ if (hh_lock & PFHH_SEM_BUSY_M) {
+ usleep_range(10000, 15000);
+ continue;
+ }
+ break;
+ }
if (hh_lock & PFHH_SEM_BUSY_M) {
dev_err(ice_pf_to_dev(pf), "PTP failed to get hh lock\n");
- return -EFAULT;
+ return -EBUSY;
}
+ /* Program cmd to master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
+
/* Start the ART and device clock sync sequence */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
hh_art_ctl = hh_art_ctl | GLHH_ART_CTL_ACTIVE_M;
wr32(hw, GLHH_ART_CTL, hh_art_ctl);
-#define MAX_HH_LOCK_TRIES 100
-
- for (i = 0; i < MAX_HH_LOCK_TRIES; i++) {
+ for (i = 0; i < MAX_HH_CTL_LOCK_TRIES; i++) {
/* Wait for sync to complete */
hh_art_ctl = rd32(hw, GLHH_ART_CTL);
if (hh_art_ctl & GLHH_ART_CTL_ACTIVE_M) {
@@ -2014,19 +1987,23 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
break;
}
}
+
+ /* Clear the master timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Release HW lock */
hh_lock = rd32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id));
hh_lock = hh_lock & ~PFHH_SEM_BUSY_M;
wr32(hw, PFHH_SEM + (PFTSYN_SEM_BYTES * hw->pf_id), hh_lock);
- if (i == MAX_HH_LOCK_TRIES)
+ if (i == MAX_HH_CTL_LOCK_TRIES)
return -ETIMEDOUT;
return 0;
}
/**
- * ice_ptp_getcrosststamp_e822 - Capture a device cross timestamp
+ * ice_ptp_getcrosststamp_e82x - Capture a device cross timestamp
* @info: the driver's PTP info structure
* @cts: The memory to fill the cross timestamp info
*
@@ -2034,14 +2011,14 @@ ice_ptp_get_syncdevicetime(ktime_t *device,
* clock. Fill the cross timestamp information and report it back to the
* caller.
*
- * This is only valid for E822 devices which have support for generating the
- * cross timestamp via PCIe PTM.
+ * This is only valid for E822 and E823 devices which have support for
+ * generating the cross timestamp via PCIe PTM.
*
* In order to correctly correlate the ART timestamp back to the TSC time, the
* CPU must have X86_FEATURE_TSC_KNOWN_FREQ.
*/
static int
-ice_ptp_getcrosststamp_e822(struct ptp_clock_info *info,
+ice_ptp_getcrosststamp_e82x(struct ptp_clock_info *info,
struct system_device_crosststamp *cts)
{
struct ice_pf *pf = ptp_info_to_pf(info);
@@ -2081,10 +2058,10 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
{
switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
- ice_set_tx_tstamp(pf, false);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_OFF;
break;
case HWTSTAMP_TX_ON:
- ice_set_tx_tstamp(pf, true);
+ pf->ptp.tstamp_config.tx_type = HWTSTAMP_TX_ON;
break;
default:
return -ERANGE;
@@ -2092,7 +2069,7 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
- ice_set_rx_tstamp(pf, false);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
@@ -2108,12 +2085,15 @@ ice_ptp_set_timestamp_mode(struct ice_pf *pf, struct hwtstamp_config *config)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
case HWTSTAMP_FILTER_NTP_ALL:
case HWTSTAMP_FILTER_ALL:
- ice_set_rx_tstamp(pf, true);
+ pf->ptp.tstamp_config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
return -ERANGE;
}
+ /* Immediately update the device timestamping mode */
+ ice_ptp_restore_timestamp_mode(pf);
+
return 0;
}
@@ -2246,18 +2226,20 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
- info->n_per_out = N_PER_OUT_E810;
-
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- info->n_ext_ts = N_EXT_TS_E810;
-
if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
info->n_ext_ts = N_EXT_TS_E810;
+ info->n_per_out = N_PER_OUT_E810T;
info->n_pins = NUM_PTP_PINS_E810T;
info->verify = ice_verify_pin_e810t;
/* Complete setup of the SMA pins */
ice_ptp_setup_sma_pins_e810t(pf, info);
+ } else if (ice_is_e810t(&pf->hw)) {
+ info->n_ext_ts = N_EXT_TS_NO_SMA_E810T;
+ info->n_per_out = N_PER_OUT_NO_SMA_E810T;
+ } else {
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
}
@@ -2275,22 +2257,22 @@ ice_ptp_setup_pins_e823(struct ice_pf *pf, struct ptp_clock_info *info)
}
/**
- * ice_ptp_set_funcs_e822 - Set specialized functions for E822 support
+ * ice_ptp_set_funcs_e82x - Set specialized functions for E82x support
* @pf: Board private structure
* @info: PTP info to fill
*
- * Assign functions to the PTP capabiltiies structure for E822 devices.
+ * Assign functions to the PTP capabiltiies structure for E82x devices.
* Functions which operate across all device families should be set directly
- * in ice_ptp_set_caps. Only add functions here which are distinct for E822
+ * in ice_ptp_set_caps. Only add functions here which are distinct for E82x
* devices.
*/
static void
-ice_ptp_set_funcs_e822(struct ice_pf *pf, struct ptp_clock_info *info)
+ice_ptp_set_funcs_e82x(struct ice_pf *pf, struct ptp_clock_info *info)
{
#ifdef CONFIG_ICE_HWTS
if (boot_cpu_has(X86_FEATURE_ART) &&
boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ))
- info->getcrosststamp = ice_ptp_getcrosststamp_e822;
+ info->getcrosststamp = ice_ptp_getcrosststamp_e82x;
#endif /* CONFIG_ICE_HWTS */
}
@@ -2324,6 +2306,8 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
static void
ice_ptp_set_funcs_e823(struct ice_pf *pf, struct ptp_clock_info *info)
{
+ ice_ptp_set_funcs_e82x(pf, info);
+
info->enable = ice_ptp_gpio_enable_e823;
ice_ptp_setup_pins_e823(pf, info);
}
@@ -2351,7 +2335,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
else if (ice_is_e823(&pf->hw))
ice_ptp_set_funcs_e823(pf, info);
else
- ice_ptp_set_funcs_e822(pf, info);
+ ice_ptp_set_funcs_e82x(pf, info);
}
/**
@@ -2366,7 +2350,6 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
static long ice_ptp_create_clock(struct ice_pf *pf)
{
struct ptp_clock_info *info;
- struct ptp_clock *clock;
struct device *dev;
/* No need to create a clock device if we already have one */
@@ -2379,11 +2362,11 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
dev = ice_pf_to_dev(pf);
/* Attempt to register the clock before enabling the hardware. */
- clock = ptp_clock_register(info, dev);
- if (IS_ERR(clock))
- return PTR_ERR(clock);
-
- pf->ptp.clock = clock;
+ pf->ptp.clock = ptp_clock_register(info, dev);
+ if (IS_ERR(pf->ptp.clock)) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP clock device");
+ return PTR_ERR(pf->ptp.clock);
+ }
return 0;
}
@@ -2440,7 +2423,21 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
*/
enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf)
{
- return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ switch (pf->ptp.tx_interrupt_mode) {
+ case ICE_PTP_TX_INTERRUPT_NONE:
+ /* This device has the clock owner handle timestamps for it */
+ return ICE_TX_TSTAMP_WORK_DONE;
+ case ICE_PTP_TX_INTERRUPT_SELF:
+ /* This device handles its own timestamps */
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
+ case ICE_PTP_TX_INTERRUPT_ALL:
+ /* This device handles timestamps for all ports */
+ return ice_ptp_tx_tstamp_owner(pf);
+ default:
+ WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n",
+ pf->ptp.tx_interrupt_mode);
+ return ICE_TX_TSTAMP_WORK_DONE;
+ }
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2474,7 +2471,7 @@ void ice_ptp_reset(struct ice_pf *pf)
if (test_bit(ICE_PFR_REQ, pf->state))
goto pfr;
- if (!hw->func_caps.ts_func_info.src_tmr_owned)
+ if (!ice_pf_src_tmr_owned(pf))
goto reset_ts;
err = ice_ptp_init_phc(hw);
@@ -2550,6 +2547,209 @@ err:
}
/**
+ * ice_ptp_aux_dev_to_aux_pf - Get auxiliary PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the auxiliary PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_aux_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port *aux_port;
+ struct ice_ptp *aux_ptp;
+
+ aux_port = container_of(aux_dev, struct ice_ptp_port, aux_dev);
+ aux_ptp = container_of(aux_port, struct ice_ptp, port);
+
+ return container_of(aux_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_aux_dev_to_owner_pf - Get PF handle for the auxiliary device
+ * @aux_dev: auxiliary device to get the PF for
+ */
+static struct ice_pf *
+ice_ptp_aux_dev_to_owner_pf(struct auxiliary_device *aux_dev)
+{
+ struct ice_ptp_port_owner *ports_owner;
+ struct auxiliary_driver *aux_drv;
+ struct ice_ptp *owner_ptp;
+
+ if (!aux_dev->dev.driver)
+ return NULL;
+
+ aux_drv = to_auxiliary_drv(aux_dev->dev.driver);
+ ports_owner = container_of(aux_drv, struct ice_ptp_port_owner,
+ aux_driver);
+ owner_ptp = container_of(ports_owner, struct ice_ptp, ports_owner);
+ return container_of(owner_ptp, struct ice_pf, ptp);
+}
+
+/**
+ * ice_ptp_auxbus_probe - Probe auxiliary devices
+ * @aux_dev: PF's auxiliary device
+ * @id: Auxiliary device ID
+ */
+static int ice_ptp_auxbus_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ if (WARN_ON(!owner_pf))
+ return -ENODEV;
+
+ INIT_LIST_HEAD(&aux_pf->ptp.port.list_member);
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_add(&aux_pf->ptp.port.list_member,
+ &owner_pf->ptp.ports_owner.ports);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_remove - Remove auxiliary devices from the bus
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_remove(struct auxiliary_device *aux_dev)
+{
+ struct ice_pf *owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ struct ice_pf *aux_pf = ice_ptp_aux_dev_to_aux_pf(aux_dev);
+
+ mutex_lock(&owner_pf->ptp.ports_owner.lock);
+ list_del(&aux_pf->ptp.port.list_member);
+ mutex_unlock(&owner_pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_auxbus_shutdown
+ * @aux_dev: PF's auxiliary device
+ */
+static void ice_ptp_auxbus_shutdown(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+}
+
+/**
+ * ice_ptp_auxbus_suspend
+ * @aux_dev: PF's auxiliary device
+ * @state: power management state indicator
+ */
+static int
+ice_ptp_auxbus_suspend(struct auxiliary_device *aux_dev, pm_message_t state)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_resume
+ * @aux_dev: PF's auxiliary device
+ */
+static int ice_ptp_auxbus_resume(struct auxiliary_device *aux_dev)
+{
+ /* Doing nothing here, but handle to auxbus driver must be satisfied */
+ return 0;
+}
+
+/**
+ * ice_ptp_auxbus_create_id_table - Create auxiliary device ID table
+ * @pf: Board private structure
+ * @name: auxiliary bus driver name
+ */
+static struct auxiliary_device_id *
+ice_ptp_auxbus_create_id_table(struct ice_pf *pf, const char *name)
+{
+ struct auxiliary_device_id *ids;
+
+ /* Second id left empty to terminate the array */
+ ids = devm_kcalloc(ice_pf_to_dev(pf), 2,
+ sizeof(struct auxiliary_device_id), GFP_KERNEL);
+ if (!ids)
+ return NULL;
+
+ snprintf(ids[0].name, sizeof(ids[0].name), "ice.%s", name);
+
+ return ids;
+}
+
+/**
+ * ice_ptp_register_auxbus_driver - Register PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static int ice_ptp_register_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+
+ ptp = &pf->ptp;
+ dev = ice_pf_to_dev(pf);
+ aux_driver = &ptp->ports_owner.aux_driver;
+ INIT_LIST_HEAD(&ptp->ports_owner.ports);
+ mutex_init(&ptp->ports_owner.lock);
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_driver->name = name;
+ aux_driver->shutdown = ice_ptp_auxbus_shutdown;
+ aux_driver->suspend = ice_ptp_auxbus_suspend;
+ aux_driver->remove = ice_ptp_auxbus_remove;
+ aux_driver->resume = ice_ptp_auxbus_resume;
+ aux_driver->probe = ice_ptp_auxbus_probe;
+ aux_driver->id_table = ice_ptp_auxbus_create_id_table(pf, name);
+ if (!aux_driver->id_table)
+ return -ENOMEM;
+
+ err = auxiliary_driver_register(aux_driver);
+ if (err) {
+ devm_kfree(dev, aux_driver->id_table);
+ dev_err(dev, "Failed registering aux_driver, name <%s>\n",
+ name);
+ }
+
+ return err;
+}
+
+/**
+ * ice_ptp_unregister_auxbus_driver - Unregister PTP auxiliary bus driver
+ * @pf: Board private structure
+ */
+static void ice_ptp_unregister_auxbus_driver(struct ice_pf *pf)
+{
+ struct auxiliary_driver *aux_driver = &pf->ptp.ports_owner.aux_driver;
+
+ auxiliary_driver_unregister(aux_driver);
+ devm_kfree(ice_pf_to_dev(pf), aux_driver->id_table);
+
+ mutex_destroy(&pf->ptp.ports_owner.lock);
+}
+
+/**
+ * ice_ptp_clock_index - Get the PTP clock index for this device
+ * @pf: Board private structure
+ *
+ * Returns: the PTP clock index associated with this PF, or -1 if no PTP clock
+ * is associated.
+ */
+int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_pf *owner_pf;
+ struct ptp_clock *clock;
+
+ aux_dev = &pf->ptp.port.aux_dev;
+ owner_pf = ice_ptp_aux_dev_to_owner_pf(aux_dev);
+ if (!owner_pf)
+ return -1;
+ clock = owner_pf->ptp.clock;
+
+ return clock ? ptp_clock_index(clock) : -1;
+}
+
+/**
* ice_ptp_prepare_for_reset - Prepare PTP for reset
* @pf: Board private structure
*/
@@ -2561,7 +2761,7 @@ void ice_ptp_prepare_for_reset(struct ice_pf *pf)
clear_bit(ICE_FLAG_PTP, pf->flags);
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
kthread_cancel_delayed_work_sync(&ptp->work);
@@ -2639,11 +2839,15 @@ static int ice_ptp_init_owner(struct ice_pf *pf)
if (err)
goto err_clk;
- /* Store the PTP clock index for other PFs */
- ice_set_ptp_clock_index(pf);
+ err = ice_ptp_register_auxbus_driver(pf);
+ if (err) {
+ dev_err(ice_pf_to_dev(pf), "Failed to register PTP auxbus driver");
+ goto err_aux;
+ }
return 0;
-
+err_aux:
+ ptp_clock_unregister(pf->ptp.clock);
err_clk:
pf->ptp.clock = NULL;
err_exit:
@@ -2685,14 +2889,117 @@ static int ice_ptp_init_work(struct ice_pf *pf, struct ice_ptp *ptp)
*/
static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port)
{
+ struct ice_hw *hw = &pf->hw;
+
mutex_init(&ptp_port->ps_lock);
- if (ice_is_e810(&pf->hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_ptp_init_tx_e810(pf, &ptp_port->tx);
+ case ICE_PHY_E822:
+ kthread_init_delayed_work(&ptp_port->ov_work,
+ ice_ptp_wait_for_offsets);
+
+ return ice_ptp_init_tx_e822(pf, &ptp_port->tx,
+ ptp_port->port_num);
+ default:
+ return -ENODEV;
+ }
+}
- kthread_init_delayed_work(&ptp_port->ov_work,
- ice_ptp_wait_for_offsets);
- return ice_ptp_init_tx_e822(pf, &ptp_port->tx, ptp_port->port_num);
+/**
+ * ice_ptp_release_auxbus_device
+ * @dev: device that utilizes the auxbus
+ */
+static void ice_ptp_release_auxbus_device(struct device *dev)
+{
+ /* Doing nothing here, but handle to auxbux device must be satisfied */
+}
+
+/**
+ * ice_ptp_create_auxbus_device - Create PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static int ice_ptp_create_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev;
+ struct ice_ptp *ptp;
+ struct device *dev;
+ char *name;
+ int err;
+ u32 id;
+
+ ptp = &pf->ptp;
+ id = ptp->port.port_num;
+ dev = ice_pf_to_dev(pf);
+
+ aux_dev = &ptp->port.aux_dev;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "ptp_aux_dev_%u_%u_clk%u",
+ pf->pdev->bus->number, PCI_SLOT(pf->pdev->devfn),
+ ice_get_ptp_src_clock_index(&pf->hw));
+
+ aux_dev->name = name;
+ aux_dev->id = id;
+ aux_dev->dev.release = ice_ptp_release_auxbus_device;
+ aux_dev->dev.parent = dev;
+
+ err = auxiliary_device_init(aux_dev);
+ if (err)
+ goto aux_err;
+
+ err = auxiliary_device_add(aux_dev);
+ if (err) {
+ auxiliary_device_uninit(aux_dev);
+ goto aux_err;
+ }
+
+ return 0;
+aux_err:
+ dev_err(dev, "Failed to create PTP auxiliary bus device <%s>\n", name);
+ devm_kfree(dev, name);
+ return err;
+}
+
+/**
+ * ice_ptp_remove_auxbus_device - Remove PTP auxiliary bus device
+ * @pf: Board private structure
+ */
+static void ice_ptp_remove_auxbus_device(struct ice_pf *pf)
+{
+ struct auxiliary_device *aux_dev = &pf->ptp.port.aux_dev;
+
+ auxiliary_device_delete(aux_dev);
+ auxiliary_device_uninit(aux_dev);
+
+ memset(aux_dev, 0, sizeof(*aux_dev));
+}
+
+/**
+ * ice_ptp_init_tx_interrupt_mode - Initialize device Tx interrupt mode
+ * @pf: Board private structure
+ *
+ * Initialize the Tx timestamp interrupt mode for this device. For most device
+ * types, each PF processes the interrupt and manages its own timestamps. For
+ * E822-based devices, only the clock owner processes the timestamps. Other
+ * PFs disable the interrupt and do not process their own timestamps.
+ */
+static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf)
+{
+ switch (pf->hw.phy_model) {
+ case ICE_PHY_E822:
+ /* E822 based PHY has the clock owner process the interrupt
+ * for all ports.
+ */
+ if (ice_pf_src_tmr_owned(pf))
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
+ else
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_NONE;
+ break;
+ default:
+ /* other PHY types handle their own Tx interrupt */
+ pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_SELF;
+ }
}
/**
@@ -2713,10 +3020,14 @@ void ice_ptp_init(struct ice_pf *pf)
struct ice_hw *hw = &pf->hw;
int err;
+ ice_ptp_init_phy_model(hw);
+
+ ice_ptp_init_tx_interrupt_mode(pf);
+
/* If this function owns the clock hardware, it must allocate and
* configure the PTP clock device to represent it.
*/
- if (hw->func_caps.ts_func_info.src_tmr_owned) {
+ if (ice_pf_src_tmr_owned(pf)) {
err = ice_ptp_init_owner(pf);
if (err)
goto err;
@@ -2730,11 +3041,18 @@ void ice_ptp_init(struct ice_pf *pf)
/* Start the PHY timestamping block */
ice_ptp_reset_phy_timestamping(pf);
+ /* Configure initial Tx interrupt settings */
+ ice_ptp_cfg_tx_interrupt(pf);
+
set_bit(ICE_FLAG_PTP, pf->flags);
err = ice_ptp_init_work(pf, ptp);
if (err)
goto err;
+ err = ice_ptp_create_auxbus_device(pf);
+ if (err)
+ goto err;
+
dev_info(ice_pf_to_dev(pf), "PTP init successful\n");
return;
@@ -2761,7 +3079,9 @@ void ice_ptp_release(struct ice_pf *pf)
return;
/* Disable timestamping for both Tx and Rx */
- ice_ptp_cfg_timestamp(pf, false);
+ ice_ptp_disable_timestamp_mode(pf);
+
+ ice_ptp_remove_auxbus_device(pf);
ice_ptp_release_tx_tracker(pf, &pf->ptp.port.tx);
@@ -2782,9 +3102,10 @@ void ice_ptp_release(struct ice_pf *pf)
/* Disable periodic outputs */
ice_ptp_disable_all_clkout(pf);
- ice_clear_ptp_clock_index(pf);
ptp_clock_unregister(pf->ptp.clock);
pf->ptp.clock = NULL;
+ ice_ptp_unregister_auxbus_driver(pf);
+
dev_info(ice_pf_to_dev(pf), "Removed PTP clock\n");
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 995a57019ba7..06a330867fc9 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -157,7 +157,9 @@ struct ice_ptp_tx {
* ready for PTP functionality. It is used to track the port initialization
* and determine when the port's PHY offset is valid.
*
+ * @list_member: list member structure of auxiliary device
* @tx: Tx timestamp tracking for this port
+ * @aux_dev: auxiliary device associated with this port
* @ov_work: delayed work task for tracking when PHY offset is valid
* @ps_lock: mutex used to protect the overall PTP PHY start procedure
* @link_up: indicates whether the link is up
@@ -165,7 +167,9 @@ struct ice_ptp_tx {
* @port_num: the port number this structure represents
*/
struct ice_ptp_port {
+ struct list_head list_member;
struct ice_ptp_tx tx;
+ struct auxiliary_device aux_dev;
struct kthread_delayed_work ov_work;
struct mutex ps_lock; /* protects overall PTP PHY start procedure */
bool link_up;
@@ -173,11 +177,35 @@ struct ice_ptp_port {
u8 port_num;
};
+enum ice_ptp_tx_interrupt {
+ ICE_PTP_TX_INTERRUPT_NONE = 0,
+ ICE_PTP_TX_INTERRUPT_SELF,
+ ICE_PTP_TX_INTERRUPT_ALL,
+};
+
+/**
+ * struct ice_ptp_port_owner - data used to handle the PTP clock owner info
+ *
+ * This structure contains data necessary for the PTP clock owner to correctly
+ * handle the timestamping feature for all attached ports.
+ *
+ * @aux_driver: the structure carring the auxiliary driver information
+ * @ports: list of porst handled by this port owner
+ * @lock: protect access to ports list
+ */
+struct ice_ptp_port_owner {
+ struct auxiliary_driver aux_driver;
+ struct list_head ports;
+ struct mutex lock;
+};
+
#define GLTSYN_TGT_H_IDX_MAX 4
/**
* struct ice_ptp - data used for integrating with CONFIG_PTP_1588_CLOCK
+ * @tx_interrupt_mode: the TX interrupt mode for the PTP clock
* @port: data for the PHY port initialization procedure
+ * @ports_owner: data for the auxiliary driver owner
* @work: delayed work function for periodic tasks
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
* @cached_phc_jiffies: jiffies when cached_phc_time was last updated
@@ -197,7 +225,9 @@ struct ice_ptp_port {
* @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
+ enum ice_ptp_tx_interrupt tx_interrupt_mode;
struct ice_ptp_port port;
+ struct ice_ptp_port_owner ports_owner;
struct kthread_delayed_work work;
u64 cached_phc_time;
unsigned long cached_phc_jiffies;
@@ -258,11 +288,11 @@ struct ice_ptp {
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
+int ice_ptp_clock_index(struct ice_pf *pf);
struct ice_pf;
int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr);
int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr);
-void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
-int ice_get_ptp_clock_index(struct ice_pf *pf);
+void ice_ptp_restore_timestamp_mode(struct ice_pf *pf);
void ice_ptp_extts_event(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
@@ -287,12 +317,7 @@ static inline int ice_ptp_get_ts_config(struct ice_pf *pf, struct ifreq *ifr)
return -EOPNOTSUPP;
}
-static inline void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena) { }
-static inline int ice_get_ptp_clock_index(struct ice_pf *pf)
-{
- return -1;
-}
-
+static inline void ice_ptp_restore_timestamp_mode(struct ice_pf *pf) { }
static inline void ice_ptp_extts_event(struct ice_pf *pf) { }
static inline s8
ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
@@ -314,5 +339,10 @@ static inline void ice_ptp_release(struct ice_pf *pf) { }
static inline void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup)
{
}
+
+static inline int ice_ptp_clock_index(struct ice_pf *pf)
+{
+ return -1;
+}
#endif /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */
#endif /* _ICE_PTP_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index a38614d21ea8..a00b55e14aac 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -7,6 +7,132 @@
#include "ice_ptp_consts.h"
#include "ice_cgu_regs.h"
+static struct dpll_pin_frequency ice_cgu_pin_freq_common[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_1_hz[] = {
+ DPLL_PIN_FREQUENCY_1PPS,
+};
+
+static struct dpll_pin_frequency ice_cgu_pin_freq_10_mhz[] = {
+ DPLL_PIN_FREQUENCY_10MHZ,
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_inputs[] = {
+ { "CVL-SDP22", ZL_REF0P, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "CVL-SDP20", ZL_REF0N, DPLL_PIN_TYPE_INT_OSCILLATOR,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "C827_0-RCLKA", ZL_REF1P, DPLL_PIN_TYPE_MUX, },
+ { "C827_0-RCLKB", ZL_REF1N, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKA", ZL_REF2P, DPLL_PIN_TYPE_MUX, },
+ { "C827_1-RCLKB", ZL_REF2N, DPLL_PIN_TYPE_MUX, },
+ { "SMA1", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "SMA2/U.FL2", ZL_REF3N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "GNSS-1PPS", ZL_REF4P, DPLL_PIN_TYPE_GNSS,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_sfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "MAC-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, },
+ { "CVL-SDP21", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e810t_qsfp_cgu_outputs[] = {
+ { "REF-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "REF-SMA2/U.FL2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "PHY2-CLK", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "MAC-CLK", ZL_OUT4, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CVL-SDP21", ZL_OUT5, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "CVL-SDP23", ZL_OUT6, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_inputs[] = {
+ { "NONE", SI_REF0P, 0, 0 },
+ { "NONE", SI_REF0N, 0, 0 },
+ { "SYNCE0_DP", SI_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", SI_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "EXT_CLK_SYNC", SI_REF2P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", SI_REF2N, 0, 0 },
+ { "EXT_PPS_OUT", SI_REF3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "INT_PPS_OUT", SI_REF4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_si_cgu_outputs[] = {
+ { "1588-TIME_SYNC", SI_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "PHY-CLK", SI_OUT1, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "10MHZ-SMA2", SI_OUT2, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PPS-SMA1", SI_OUT3, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_inputs[] = {
+ { "NONE", ZL_REF0P, 0, 0 },
+ { "INT_PPS_OUT", ZL_REF0N, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "SYNCE0_DP", ZL_REF1P, DPLL_PIN_TYPE_MUX, 0 },
+ { "SYNCE0_DN", ZL_REF1N, DPLL_PIN_TYPE_MUX, 0 },
+ { "NONE", ZL_REF2P, 0, 0 },
+ { "NONE", ZL_REF2N, 0, 0 },
+ { "EXT_CLK_SYNC", ZL_REF3P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_REF3N, 0, 0 },
+ { "EXT_PPS_OUT", ZL_REF4P, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "OCXO", ZL_REF4N, DPLL_PIN_TYPE_INT_OSCILLATOR, 0 },
+};
+
+static const struct ice_cgu_pin_desc ice_e823_zl_cgu_outputs[] = {
+ { "PPS-SMA1", ZL_OUT0, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_1_hz), ice_cgu_pin_freq_1_hz },
+ { "10MHZ-SMA2", ZL_OUT1, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_10_mhz), ice_cgu_pin_freq_10_mhz },
+ { "PHY-CLK", ZL_OUT2, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "1588-TIME_REF", ZL_OUT3, DPLL_PIN_TYPE_SYNCE_ETH_PORT, 0 },
+ { "CPK-TIME_SYNC", ZL_OUT4, DPLL_PIN_TYPE_EXT,
+ ARRAY_SIZE(ice_cgu_pin_freq_common), ice_cgu_pin_freq_common },
+ { "NONE", ZL_OUT5, 0, 0 },
+};
+
/* Low level functions for interacting with and managing the device clock used
* for the Precision Time Protocol.
*
@@ -107,7 +233,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw)
*
* Prepare the source timer for an upcoming timer sync command.
*/
-static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val;
u8 tmr_idx;
@@ -116,21 +242,23 @@ static void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
cmd_val = tmr_idx << SEL_CPK_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= GLTSYN_CMD_ADJ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= GLTSYN_CMD_READ_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
wr32(hw, GLTSYN_CMD, cmd_val);
@@ -166,9 +294,9 @@ ice_fill_phy_msg_e822(struct ice_sbq_msg_input *msg, u8 port, u16 offset)
{
int phy_port, phy, quadtype;
- phy_port = port % ICE_PORTS_PER_PHY;
- phy = port / ICE_PORTS_PER_PHY;
- quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_NUM_QUAD_TYPE;
+ phy_port = port % ICE_PORTS_PER_PHY_E822;
+ phy = port / ICE_PORTS_PER_PHY_E822;
+ quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E822;
if (quadtype == 0) {
msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port);
@@ -293,7 +421,7 @@ static bool ice_is_40b_phy_reg_e822(u16 low_addr, u16 *high_addr)
*
* Read a PHY register for the given port over the device sideband queue.
*/
-int
+static int
ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val)
{
struct ice_sbq_msg_input msg = {0};
@@ -370,7 +498,7 @@ ice_read_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 *val)
*
* Write a PHY register for the given port over the device sideband queue.
*/
-int
+static int
ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val)
{
struct ice_sbq_msg_input msg = {0};
@@ -493,20 +621,25 @@ ice_write_64b_phy_reg_e822(struct ice_hw *hw, u8 port, u16 low_addr, u64 val)
* Fill a message buffer for accessing a register in a quad shared between
* multiple PHYs.
*/
-static void
+static int
ice_fill_quad_msg_e822(struct ice_sbq_msg_input *msg, u8 quad, u16 offset)
{
u32 addr;
+ if (quad >= ICE_MAX_QUAD)
+ return -EINVAL;
+
msg->dest_dev = rmn_0;
- if ((quad % ICE_NUM_QUAD_TYPE) == 0)
+ if ((quad % ICE_QUADS_PER_PHY_E822) == 0)
addr = Q_0_BASE + offset;
else
addr = Q_1_BASE + offset;
msg->msg_addr_low = lower_16_bits(addr);
msg->msg_addr_high = upper_16_bits(addr);
+
+ return 0;
}
/**
@@ -525,10 +658,10 @@ ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_rd;
err = ice_sbq_rw_reg(hw, &msg);
@@ -559,10 +692,10 @@ ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val)
struct ice_sbq_msg_input msg = {0};
int err;
- if (quad >= ICE_MAX_QUAD)
- return -EINVAL;
+ err = ice_fill_quad_msg_e822(&msg, quad, offset);
+ if (err)
+ return err;
- ice_fill_quad_msg_e822(&msg, quad, offset);
msg.opcode = ice_sbq_msg_wr;
msg.data = val;
@@ -626,29 +759,32 @@ ice_read_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp)
* @quad: the quad to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the PHY quad block that is
- * shared between the internal PHYs on the E822 devices.
+ * Read the timestamp out of the quad to clear its timestamp status bit from
+ * the PHY quad block that is shared between the internal PHYs of the E822
+ * devices.
+ *
+ * Note that unlike E810, software cannot directly write to the quad memory
+ * bank registers. E822 relies on the ice_get_phy_tx_tstamp_ready() function
+ * to determine which timestamps are valid. Reading a timestamp auto-clears
+ * the valid bit.
+ *
+ * To directly clear the contents of the timestamp block entirely, discarding
+ * all timestamp data at once, software should instead use
+ * ice_ptp_reset_ts_memory_quad_e822().
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int
ice_clear_phy_tstamp_e822(struct ice_hw *hw, u8 quad, u8 idx)
{
- u16 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
- lo_addr = (u16)TS_L(Q_REG_TX_MEMORY_BANK_START, idx);
- hi_addr = (u16)TS_H(Q_REG_TX_MEMORY_BANK_START, idx);
-
- err = ice_write_quad_reg_e822(hw, quad, lo_addr, 0);
+ err = ice_read_phy_tstamp_e822(hw, quad, idx, &unused_tstamp);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
- return err;
- }
-
- err = ice_write_quad_reg_e822(hw, quad, hi_addr, 0);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for quad %u, idx %u, err %d\n",
+ quad, idx, err);
return err;
}
@@ -1023,7 +1159,7 @@ static int ice_ptp_init_phc_e822(struct ice_hw *hw)
* @time: Time to initialize the PHY port clocks to
*
* Program the PHY port registers with a new initial time value. The port
- * clock will be initialized once the driver issues an INIT_TIME sync
+ * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync
* command. The time value is the upper 32 bits of the PHY timer, usually in
* units of nominal nanoseconds.
*/
@@ -1072,14 +1208,14 @@ exit_err:
*
* Program the port for an atomic adjustment by writing the Tx and Rx timer
* registers. The atomic adjustment won't be completed until the driver issues
- * an ADJ_TIME command.
+ * an ICE_PTP_ADJ_TIME command.
*
* Note that time is not in units of nanoseconds. It is in clock time
* including the lower sub-nanosecond portion of the port timer.
*
* Negative adjustments are supported using 2s complement arithmetic.
*/
-int
+static int
ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time)
{
u32 l_time, u_time;
@@ -1125,7 +1261,7 @@ exit_err:
*
* Prepare the PHY ports for an atomic time adjustment by programming the PHY
* Tx and Rx port registers. The actual adjustment is completed by issuing an
- * ADJ_TIME or ADJ_TIME_AT_TIME sync command.
+ * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command.
*/
static int
ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
@@ -1160,7 +1296,7 @@ ice_ptp_prep_phy_adj_e822(struct ice_hw *hw, s32 adj)
*
* Prepare each of the PHY ports for a new increment value by programming the
* port's TIMETUS registers. The new increment value will be updated after
- * issuing an INIT_INCVAL command.
+ * issuing an ICE_PTP_INIT_INCVAL command.
*/
static int
ice_ptp_prep_phy_incval_e822(struct ice_hw *hw, u64 incval)
@@ -1226,18 +1362,18 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts)
}
/**
- * ice_ptp_one_port_cmd - Prepare a single PHY port for a timer command
+ * ice_ptp_write_port_cmd_e822 - Prepare a single PHY port for a timer command
* @hw: pointer to HW struct
* @port: Port to which cmd has to be sent
* @cmd: Command to be sent to the port
*
* Prepare the requested port for an upcoming timer sync command.
*
- * Note there is no equivalent of this operation on E810, as that device
- * always handles all external PHYs internally.
+ * Do not use this function directly. If you want to configure exactly one
+ * port, use ice_ptp_one_port_cmd() instead.
*/
static int
-ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
+ice_ptp_write_port_cmd_e822(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
{
u32 cmd_val, val;
u8 tmr_idx;
@@ -1246,21 +1382,23 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
tmr_idx = ice_get_ptp_src_clock_index(hw);
cmd_val = tmr_idx << SEL_PHY_SRC;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val |= PHY_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val |= PHY_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val |= PHY_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val |= PHY_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME;
break;
+ case ICE_PTP_NOP:
+ break;
}
/* Tx case */
@@ -1307,6 +1445,39 @@ ice_ptp_one_port_cmd(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd)
}
/**
+ * ice_ptp_one_port_cmd - Prepare one port for a timer command
+ * @hw: pointer to the HW struct
+ * @configured_port: the port to configure with configured_cmd
+ * @configured_cmd: timer command to prepare on the configured_port
+ *
+ * Prepare the configured_port for the configured_cmd, and prepare all other
+ * ports for ICE_PTP_NOP. This causes the configured_port to execute the
+ * desired command while all other ports perform no operation.
+ */
+static int
+ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port,
+ enum ice_ptp_tmr_cmd configured_cmd)
+{
+ u8 port;
+
+ for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
+ enum ice_ptp_tmr_cmd cmd;
+ int err;
+
+ if (port == configured_port)
+ cmd = configured_cmd;
+ else
+ cmd = ICE_PTP_NOP;
+
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/**
* ice_ptp_port_cmd_e822 - Prepare all ports for a timer command
* @hw: pointer to the HW struct
* @cmd: timer command to prepare
@@ -1322,7 +1493,7 @@ ice_ptp_port_cmd_e822(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) {
int err;
- err = ice_ptp_one_port_cmd(hw, port, cmd);
+ err = ice_ptp_write_port_cmd_e822(hw, port, cmd);
if (err)
return err;
}
@@ -2159,8 +2330,8 @@ int ice_phy_cfg_rx_offset_e822(struct ice_hw *hw, u8 port)
* @phy_time: on return, the 64bit PHY timer value
* @phc_time: on return, the lower 64bits of PHC time
*
- * Issue a READ_TIME timer command to simultaneously capture the PHY and PHC
- * timer values.
+ * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY
+ * and PHC timer values.
*/
static int
ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
@@ -2173,15 +2344,15 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
tmr_idx = ice_get_ptp_src_clock_index(hw);
- /* Prepare the PHC timer for a READ_TIME capture command */
- ice_ptp_src_cmd(hw, READ_TIME);
+ /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */
+ ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME);
- /* Prepare the PHY timer for a READ_TIME capture command */
- err = ice_ptp_one_port_cmd(hw, port, READ_TIME);
+ /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME);
if (err)
return err;
- /* Issue the sync to start the READ_TIME capture */
+ /* Issue the sync to start the ICE_PTP_READ_TIME capture */
ice_ptp_exec_tmr_cmd(hw);
/* Read the captured PHC time from the shadow time registers */
@@ -2215,10 +2386,11 @@ ice_read_phy_and_phc_time_e822(struct ice_hw *hw, u8 port, u64 *phy_time,
* @port: the PHY port to synchronize
*
* Perform an adjustment to ensure that the PHY and PHC timers are in sync.
- * This is done by issuing a READ_TIME command which triggers a simultaneous
- * read of the PHY timer and PHC timer. Then we use the difference to
- * calculate an appropriate 2s complement addition to add to the PHY timer in
- * order to ensure it reads the same value as the primary PHC timer.
+ * This is done by issuing a ICE_PTP_READ_TIME command which triggers a
+ * simultaneous read of the PHY timer and PHC timer. Then we use the
+ * difference to calculate an appropriate 2s complement addition to add
+ * to the PHY timer in order to ensure it reads the same value as the
+ * primary PHC timer.
*/
static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
{
@@ -2248,10 +2420,13 @@ static int ice_sync_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
goto err_unlock;
- err = ice_ptp_one_port_cmd(hw, port, ADJ_TIME);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME);
if (err)
goto err_unlock;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
/* Issue the sync to activate the time adjustment */
ice_ptp_exec_tmr_cmd(hw);
@@ -2368,10 +2543,13 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
+ /* Do not perform any action on the main timer */
+ ice_ptp_src_cmd(hw, ICE_PTP_NOP);
+
ice_ptp_exec_tmr_cmd(hw);
err = ice_read_phy_reg_e822(hw, port, P_REG_PS, &val);
@@ -2393,7 +2571,7 @@ int ice_start_phy_timer_e822(struct ice_hw *hw, u8 port)
if (err)
return err;
- err = ice_ptp_one_port_cmd(hw, port, INIT_INCVAL);
+ err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL);
if (err)
return err;
@@ -2642,28 +2820,39 @@ ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
* @lport: the lport to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block of the
- * external PHY on the E810 device.
+ * Read the timestamp and then forcibly overwrite its value to clear the valid
+ * bit from the timestamp block of the external PHY on the E810 device.
+ *
+ * This function should only be called on an idx whose bit is set according to
+ * ice_get_phy_tx_tstamp_ready().
*/
static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx)
{
u32 lo_addr, hi_addr;
+ u64 unused_tstamp;
int err;
+ err = ice_read_phy_tstamp_e810(hw, lport, idx, &unused_tstamp);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read the timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
+ return err;
+ }
+
lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
err = ice_write_phy_reg_e810(hw, lo_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
err = ice_write_phy_reg_e810(hw, hi_addr, 0);
if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register, err %d\n",
- err);
+ ice_debug(hw, ICE_DBG_PTP, "Failed to clear high PTP timestamp register for lport %u, idx %u, err %d\n",
+ lport, idx, err);
return err;
}
@@ -2714,7 +2903,7 @@ static int ice_ptp_init_phc_e810(struct ice_hw *hw)
*
* Program the PHY port ETH_GLTSYN_SHTIME registers in preparation setting the
* initial clock time. The time will not actually be programmed until the
- * driver issues an INIT_TIME command.
+ * driver issues an ICE_PTP_INIT_TIME command.
*
* The time value is the upper 32 bits of the PHY timer, usually in units of
* nominal nanoseconds.
@@ -2749,7 +2938,7 @@ static int ice_ptp_prep_phy_time_e810(struct ice_hw *hw, u32 time)
*
* Prepare the PHY port for an atomic adjustment by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual adjustment
- * is completed by issuing an ADJ_TIME sync command.
+ * is completed by issuing an ICE_PTP_ADJ_TIME sync command.
*
* The adjustment value only contains the portion used for the upper 32bits of
* the PHY timer, usually in units of nominal nanoseconds. Negative
@@ -2789,7 +2978,7 @@ static int ice_ptp_prep_phy_adj_e810(struct ice_hw *hw, s32 adj)
*
* Prepare the PHY port for a new increment value by programming the PHY
* ETH_GLTSYN_SHADJ_L and ETH_GLTSYN_SHADJ_H registers. The actual change is
- * completed by issuing an INIT_INCVAL command.
+ * completed by issuing an ICE_PTP_INIT_INCVAL command.
*/
static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval)
{
@@ -2832,21 +3021,23 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
int err;
switch (cmd) {
- case INIT_TIME:
+ case ICE_PTP_INIT_TIME:
cmd_val = GLTSYN_CMD_INIT_TIME;
break;
- case INIT_INCVAL:
+ case ICE_PTP_INIT_INCVAL:
cmd_val = GLTSYN_CMD_INIT_INCVAL;
break;
- case ADJ_TIME:
+ case ICE_PTP_ADJ_TIME:
cmd_val = GLTSYN_CMD_ADJ_TIME;
break;
- case READ_TIME:
+ case ICE_PTP_READ_TIME:
cmd_val = GLTSYN_CMD_READ_TIME;
break;
- case ADJ_TIME_AT_TIME:
+ case ICE_PTP_ADJ_TIME_AT_TIME:
cmd_val = GLTSYN_CMD_ADJ_INIT_TIME;
break;
+ case ICE_PTP_NOP:
+ return 0;
}
/* Read, modify, write */
@@ -2869,6 +3060,185 @@ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
return 0;
}
+/**
+ * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
+ * @hw: pointer to the HW struct
+ * @port: the PHY port to read
+ * @tstamp_ready: contents of the Tx memory status register
+ *
+ * E810 devices do not use a Tx memory status register. Instead simply
+ * indicate that all timestamps are currently ready.
+ */
+static int
+ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
+{
+ *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
+ return 0;
+}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_read_pca9575_reg_e810t
+ * @hw: pointer to the hw struct
+ * @offset: GPIO controller register offset
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the register from the GPIO controller
+ */
+int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+{
+ struct ice_aqc_link_topo_addr link_topo;
+ __le16 addr;
+ u16 handle;
+ int err;
+
+ memset(&link_topo, 0, sizeof(link_topo));
+
+ err = ice_get_pca9575_handle(hw, &handle);
+ if (err)
+ return err;
+
+ link_topo.handle = cpu_to_le16(handle);
+ link_topo.topo_params.node_type_ctx =
+ FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
+ ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
+
+ addr = cpu_to_le16((u16)offset);
+
+ return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+}
+
/* Device agnostic functions
*
* The following functions implement shared behavior common to both E822 and
@@ -2926,6 +3296,21 @@ void ice_ptp_unlock(struct ice_hw *hw)
}
/**
+ * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type
+ * @hw: pointer to the HW structure
+ *
+ * Determine the PHY model for the device, and initialize hw->phy_model
+ * for use by other functions.
+ */
+void ice_ptp_init_phy_model(struct ice_hw *hw)
+{
+ if (ice_is_e810(hw))
+ hw->phy_model = ICE_PHY_E810;
+ else
+ hw->phy_model = ICE_PHY_E822;
+}
+
+/**
* ice_ptp_tmr_cmd - Prepare and trigger a timer sync command
* @hw: pointer to HW struct
* @cmd: the command to issue
@@ -2943,10 +3328,17 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd)
ice_ptp_src_cmd(hw, cmd);
/* Next, prepare the ports */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_port_cmd_e810(hw, cmd);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_port_cmd_e822(hw, cmd);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err) {
ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n",
cmd, err);
@@ -2988,14 +3380,21 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
/* PHY timers */
/* Fill Rx and Tx ports and send msg to PHY */
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_time_e822(hw, time & 0xFFFFFFFF);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_TIME);
}
/**
@@ -3008,8 +3407,8 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time)
*
* 1) Write the increment value to the source timer shadow registers
* 2) Write the increment value to the PHY timer shadow registers
- * 3) Issue an INIT_INCVAL timer command to synchronously switch both the
- * source and port timers to the new increment value at the next clock
+ * 3) Issue an ICE_PTP_INIT_INCVAL timer command to synchronously switch both
+ * the source and port timers to the new increment value at the next clock
* cycle.
*/
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
@@ -3023,14 +3422,21 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval)
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval));
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval));
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_incval_e810(hw, incval);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_incval_e822(hw, incval);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, INIT_INCVAL);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_INIT_INCVAL);
}
/**
@@ -3064,8 +3470,8 @@ int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval)
*
* 1) Write the adjustment to the source timer shadow registers
* 2) Write the adjustment to the PHY timer shadow registers
- * 3) Issue an ADJ_TIME timer command to synchronously apply the adjustment to
- * both the source and port timers at the next clock cycle.
+ * 3) Issue an ICE_PTP_ADJ_TIME timer command to synchronously apply the
+ * adjustment to both the source and port timers at the next clock cycle.
*/
int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
{
@@ -3075,21 +3481,28 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned;
/* Write the desired clock adjustment into the GLTSYN_SHADJ register.
- * For an ADJ_TIME command, this set of registers represents the value
- * to add to the clock time. It supports subtraction by interpreting
- * the value as a 2's complement integer.
+ * For an ICE_PTP_ADJ_TIME command, this set of registers represents
+ * the value to add to the clock time. It supports subtraction by
+ * interpreting the value as a 2's complement integer.
*/
wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0);
wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj);
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
err = ice_ptp_prep_phy_adj_e810(hw, adj);
- else
+ break;
+ case ICE_PHY_E822:
err = ice_ptp_prep_phy_adj_e822(hw, adj);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ }
+
if (err)
return err;
- return ice_ptp_tmr_cmd(hw, ADJ_TIME);
+ return ice_ptp_tmr_cmd(hw, ICE_PTP_ADJ_TIME);
}
/**
@@ -3105,10 +3518,14 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj)
*/
int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_read_phy_tstamp_e810(hw, block, idx, tstamp);
- else
+ case ICE_PHY_E822:
return ice_read_phy_tstamp_e822(hw, block, idx, tstamp);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
@@ -3117,267 +3534,484 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp)
* @block: the block to read from
* @idx: the timestamp index to reset
*
- * Clear a timestamp, resetting its valid bit, from the timestamp block. For
- * E822 devices, the block is the quad to clear from. For E810 devices, the
- * block is the logical port to clear from.
+ * Clear a timestamp from the timestamp block, discarding its value without
+ * returning it. This resets the memory status bit for the timestamp index
+ * allowing it to be reused for another timestamp in the future.
+ *
+ * For E822 devices, the block number is the PHY quad to clear from. For E810
+ * devices, the block number is the logical port to clear from.
+ *
+ * This function must only be called on a timestamp index whose valid bit is
+ * set according to ice_get_phy_tx_tstamp_ready().
*/
int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
- if (ice_is_e810(hw))
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
return ice_clear_phy_tstamp_e810(hw, block, idx);
- else
+ case ICE_PHY_E822:
return ice_clear_phy_tstamp_e822(hw, block, idx);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
- * ice_get_phy_tx_tstamp_ready_e810 - Read Tx memory status register
- * @hw: pointer to the HW struct
- * @port: the PHY port to read
- * @tstamp_ready: contents of the Tx memory status register
- *
- * E810 devices do not use a Tx memory status register. Instead simply
- * indicate that all timestamps are currently ready.
- */
-static int
-ice_get_phy_tx_tstamp_ready_e810(struct ice_hw *hw, u8 port, u64 *tstamp_ready)
-{
- *tstamp_ready = 0xFFFFFFFFFFFFFFFF;
- return 0;
-}
-
-/* E810T SMA functions
- *
- * The following functions operate specifically on E810T hardware and are used
- * to access the extended GPIOs available.
- */
-
-/**
- * ice_get_pca9575_handle
+ * ice_get_pf_c827_idx - find and return the C827 index for the current pf
* @hw: pointer to the hw struct
- * @pca9575_handle: GPIO controller's handle
- *
- * Find and return the GPIO controller's handle in the netlist.
- * When found - the value will be cached in the hw structure and following calls
- * will return cached value
+ * @idx: index of the found C827 PHY
+ * Return:
+ * * 0 - success
+ * * negative - failure
*/
-static int
-ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx)
{
- struct ice_aqc_get_link_topo *cmd;
- struct ice_aq_desc desc;
+ struct ice_aqc_get_link_topo cmd;
+ u8 node_part_number;
+ u16 node_handle;
int status;
- u8 idx;
+ u8 ctx;
- /* If handle was read previously return cached value */
- if (hw->io_expander_handle) {
- *pca9575_handle = hw->io_expander_handle;
+ if (hw->mac_type != ICE_MAC_E810)
+ return -ENODEV;
+
+ if (hw->device_id != ICE_DEV_ID_E810C_QSFP) {
+ *idx = C827_0;
return 0;
}
- /* If handle was not detected read it from the netlist */
- cmd = &desc.params.get_link_topo;
- ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+ memset(&cmd, 0, sizeof(cmd));
- /* Set node type to GPIO controller */
- cmd->addr.topo_params.node_type_ctx =
- (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
- ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+ ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_PHY << ICE_AQC_LINK_TOPO_NODE_TYPE_S;
+ ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S;
+ cmd.addr.topo_params.node_type_ctx = ctx;
-#define SW_PCA9575_SFP_TOPO_IDX 2
-#define SW_PCA9575_QSFP_TOPO_IDX 1
+ status = ice_aq_get_netlist_node(hw, &cmd, &node_part_number,
+ &node_handle);
+ if (status || node_part_number != ICE_AQC_GET_LINK_TOPO_NODE_NR_C827)
+ return -ENOENT;
- /* Check if the SW IO expander controlling SMA exists in the netlist. */
- if (hw->device_id == ICE_DEV_ID_E810C_SFP)
- idx = SW_PCA9575_SFP_TOPO_IDX;
- else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
- idx = SW_PCA9575_QSFP_TOPO_IDX;
+ if (node_handle == E810C_QSFP_C827_0_HANDLE)
+ *idx = C827_0;
+ else if (node_handle == E810C_QSFP_C827_1_HANDLE)
+ *idx = C827_1;
else
- return -EOPNOTSUPP;
+ return -EIO;
- cmd->addr.topo_params.index = idx;
+ return 0;
+}
- status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
- if (status)
- return -EOPNOTSUPP;
+/**
+ * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
+ * @hw: pointer to the HW struct
+ */
+void ice_ptp_reset_ts_memory(struct ice_hw *hw)
+{
+ switch (hw->phy_model) {
+ case ICE_PHY_E822:
+ ice_ptp_reset_ts_memory_e822(hw);
+ break;
+ case ICE_PHY_E810:
+ default:
+ return;
+ }
+}
- /* Verify if we found the right IO expander type */
- if (desc.params.get_link_topo.node_part_num !=
- ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
- return -EOPNOTSUPP;
+/**
+ * ice_ptp_init_phc - Initialize PTP hardware clock
+ * @hw: pointer to the HW struct
+ *
+ * Perform the steps required to initialize the PTP hardware clock.
+ */
+int ice_ptp_init_phc(struct ice_hw *hw)
+{
+ u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
- /* If present save the handle and return it */
- hw->io_expander_handle =
- le16_to_cpu(desc.params.get_link_topo.addr.handle);
- *pca9575_handle = hw->io_expander_handle;
+ /* Enable source clocks */
+ wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
- return 0;
+ /* Clear event err indications for auxiliary pins */
+ (void)rd32(hw, GLTSYN_STAT(src_idx));
+
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
+ return ice_ptp_init_phc_e810(hw);
+ case ICE_PHY_E822:
+ return ice_ptp_init_phc_e822(hw);
+ default:
+ return -EOPNOTSUPP;
+ }
}
/**
- * ice_read_sma_ctrl_e810t
+ * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
+ * @hw: pointer to the HW struct
+ * @block: the timestamp block to check
+ * @tstamp_ready: storage for the PHY Tx memory status information
+ *
+ * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
+ * which indicates which timestamps in the block may be captured. A set bit
+ * means the timestamp can be read. An unset bit means the timestamp is not
+ * ready and software should avoid reading the register.
+ */
+int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
+{
+ switch (hw->phy_model) {
+ case ICE_PHY_E810:
+ return ice_get_phy_tx_tstamp_ready_e810(hw, block,
+ tstamp_ready);
+ case ICE_PHY_E822:
+ return ice_get_phy_tx_tstamp_ready_e822(hw, block,
+ tstamp_ready);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_cgu_get_pin_desc_e823 - get pin description array
* @hw: pointer to the hw struct
- * @data: pointer to data to be read from the GPIO controller
+ * @input: if request is done against input or output pin
+ * @size: number of inputs/outputs
*
- * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
- * PCA9575 expander, so only bits 3-7 in data are valid.
+ * Return: pointer to pin description array associated to given hw.
*/
-int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc_e823(struct ice_hw *hw, bool input, int *size)
{
- int status;
- u16 handle;
- u8 i;
+ static const struct ice_cgu_pin_desc *t;
- status = ice_get_pca9575_handle(hw, &handle);
- if (status)
- return status;
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032) {
+ if (input) {
+ t = ice_e823_zl_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_inputs);
+ } else {
+ t = ice_e823_zl_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_zl_cgu_outputs);
+ }
+ } else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384) {
+ if (input) {
+ t = ice_e823_si_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_inputs);
+ } else {
+ t = ice_e823_si_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e823_si_cgu_outputs);
+ }
+ } else {
+ t = NULL;
+ *size = 0;
+ }
- *data = 0;
+ return t;
+}
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
- bool pin;
+/**
+ * ice_cgu_get_pin_desc - get pin description array
+ * @hw: pointer to the hw struct
+ * @input: if request is done against input or output pins
+ * @size: size of array returned by function
+ *
+ * Return: pointer to pin description array associated to given hw.
+ */
+static const struct ice_cgu_pin_desc *
+ice_cgu_get_pin_desc(struct ice_hw *hw, bool input, int *size)
+{
+ const struct ice_cgu_pin_desc *t = NULL;
- status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
- &pin, NULL);
- if (status)
- break;
- *data |= (u8)(!pin) << i;
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (input) {
+ t = ice_e810t_sfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_inputs);
+ } else {
+ t = ice_e810t_sfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_sfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (input) {
+ t = ice_e810t_qsfp_cgu_inputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_inputs);
+ } else {
+ t = ice_e810t_qsfp_cgu_outputs;
+ *size = ARRAY_SIZE(ice_e810t_qsfp_cgu_outputs);
+ }
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ t = ice_cgu_get_pin_desc_e823(hw, input, size);
+ break;
+ default:
+ break;
}
- return status;
+ return t;
}
/**
- * ice_write_sma_ctrl_e810t
+ * ice_cgu_get_pin_type - get pin's type
* @hw: pointer to the hw struct
- * @data: data to be written to the GPIO controller
+ * @pin: pin index
+ * @input: if request is done against input or output pin
*
- * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
- * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ * Return: type of a pin.
*/
-int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input)
{
- int status;
- u16 handle;
- u8 i;
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
- status = ice_get_pca9575_handle(hw, &handle);
- if (status)
- return status;
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
- for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
- bool pin;
+ if (!t)
+ return 0;
- pin = !(data & (1 << i));
- status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
- pin, NULL);
- if (status)
- break;
- }
+ if (pin >= t_size)
+ return 0;
- return status;
+ return t[pin].type;
}
/**
- * ice_read_pca9575_reg_e810t
+ * ice_cgu_get_pin_freq_supp - get pin's supported frequency
* @hw: pointer to the hw struct
- * @offset: GPIO controller register offset
- * @data: pointer to data to be read from the GPIO controller
+ * @pin: pin index
+ * @input: if request is done against input or output pin
+ * @num: output number of supported frequencies
*
- * Read the register from the GPIO controller
+ * Get frequency supported number and array of supported frequencies.
+ *
+ * Return: array of supported frequencies for given pin.
*/
-int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data)
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num)
{
- struct ice_aqc_link_topo_addr link_topo;
- __le16 addr;
- u16 handle;
- int err;
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
- memset(&link_topo, 0, sizeof(link_topo));
-
- err = ice_get_pca9575_handle(hw, &handle);
- if (err)
- return err;
+ *num = 0;
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
+ if (!t)
+ return NULL;
+ if (pin >= t_size)
+ return NULL;
+ *num = t[pin].freq_supp_num;
- link_topo.handle = cpu_to_le16(handle);
- link_topo.topo_params.node_type_ctx =
- FIELD_PREP(ICE_AQC_LINK_TOPO_NODE_CTX_M,
- ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED);
-
- addr = cpu_to_le16((u16)offset);
-
- return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL);
+ return t[pin].freq_supp;
}
/**
- * ice_is_pca9575_present
+ * ice_cgu_get_pin_name - get pin's name
* @hw: pointer to the hw struct
+ * @pin: pin index
+ * @input: if request is done against input or output pin
*
- * Check if the SW IO expander is present in the netlist
+ * Return:
+ * * null terminated char array with name
+ * * NULL in case of failure
*/
-bool ice_is_pca9575_present(struct ice_hw *hw)
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input)
{
- u16 handle = 0;
- int status;
+ const struct ice_cgu_pin_desc *t;
+ int t_size;
- if (!ice_is_e810t(hw))
- return false;
+ t = ice_cgu_get_pin_desc(hw, input, &t_size);
- status = ice_get_pca9575_handle(hw, &handle);
+ if (!t)
+ return NULL;
- return !status && handle;
+ if (pin >= t_size)
+ return NULL;
+
+ return t[pin].name;
}
/**
- * ice_ptp_reset_ts_memory - Reset timestamp memory for all blocks
- * @hw: pointer to the HW struct
- */
-void ice_ptp_reset_ts_memory(struct ice_hw *hw)
-{
- if (ice_is_e810(hw))
- return;
+ * ice_get_cgu_state - get the state of the DPLL
+ * @hw: pointer to the hw struct
+ * @dpll_idx: Index of internal DPLL unit
+ * @last_dpll_state: last known state of DPLL
+ * @pin: pointer to a buffer for returning currently active pin
+ * @ref_state: reference clock state
+ * @eec_mode: eec mode of the DPLL
+ * @phase_offset: pointer to a buffer for returning phase offset
+ * @dpll_state: state of the DPLL (output)
+ *
+ * This function will read the state of the DPLL(dpll_idx). Non-null
+ * 'pin', 'ref_state', 'eec_mode' and 'phase_offset' parameters are used to
+ * retrieve currently active pin, state, mode and phase_offset respectively.
+ *
+ * Return: state of the DPLL
+ */
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state)
+{
+ u8 hw_ref_state, hw_dpll_state, hw_eec_mode, hw_config;
+ s64 hw_phase_offset;
+ int status;
+
+ status = ice_aq_get_cgu_dpll_status(hw, dpll_idx, &hw_ref_state,
+ &hw_dpll_state, &hw_config,
+ &hw_phase_offset, &hw_eec_mode);
+ if (status)
+ return status;
+
+ if (pin)
+ /* current ref pin in dpll_state_refsel_status_X register */
+ *pin = hw_config & ICE_AQC_GET_CGU_DPLL_CONFIG_CLK_REF_SEL;
+ if (phase_offset)
+ *phase_offset = hw_phase_offset;
+ if (ref_state)
+ *ref_state = hw_ref_state;
+ if (eec_mode)
+ *eec_mode = hw_eec_mode;
+ if (!dpll_state)
+ return 0;
- ice_ptp_reset_ts_memory_e822(hw);
+ /* According to ZL DPLL documentation, once state reach LOCKED_HO_ACQ
+ * it would never return to FREERUN. This aligns to ITU-T G.781
+ * Recommendation. We cannot report HOLDOVER as HO memory is cleared
+ * while switching to another reference.
+ * Only for situations where previous state was either: "LOCKED without
+ * HO_ACQ" or "HOLDOVER" we actually back to FREERUN.
+ */
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_LOCK) {
+ if (hw_dpll_state & ICE_AQC_GET_CGU_DPLL_STATUS_STATE_HO_READY)
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED_HO_ACQ;
+ else
+ *dpll_state = DPLL_LOCK_STATUS_LOCKED;
+ } else if (last_dpll_state == DPLL_LOCK_STATUS_LOCKED_HO_ACQ ||
+ last_dpll_state == DPLL_LOCK_STATUS_HOLDOVER) {
+ *dpll_state = DPLL_LOCK_STATUS_HOLDOVER;
+ } else {
+ *dpll_state = DPLL_LOCK_STATUS_UNLOCKED;
+ }
+
+ return 0;
}
/**
- * ice_ptp_init_phc - Initialize PTP hardware clock
- * @hw: pointer to the HW struct
+ * ice_get_cgu_rclk_pin_info - get info on available recovered clock pins
+ * @hw: pointer to the hw struct
+ * @base_idx: returns index of first recovered clock pin on device
+ * @pin_num: returns number of recovered clock pins available on device
*
- * Perform the steps required to initialize the PTP hardware clock.
+ * Based on hw provide caller info about recovery clock pins available on the
+ * board.
+ *
+ * Return:
+ * * 0 - success, information is valid
+ * * negative - failure, information is not valid
*/
-int ice_ptp_init_phc(struct ice_hw *hw)
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
{
- u8 src_idx = hw->func_caps.ts_func_info.tmr_index_owned;
+ u8 phy_idx;
+ int ret;
- /* Enable source clocks */
- wr32(hw, GLTSYN_ENA(src_idx), GLTSYN_ENA_TSYN_ENA_M);
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810C_QSFP:
- /* Clear event err indications for auxiliary pins */
- (void)rd32(hw, GLTSYN_STAT(src_idx));
+ ret = ice_get_pf_c827_idx(hw, &phy_idx);
+ if (ret)
+ return ret;
+ *base_idx = E810T_CGU_INPUT_C827(phy_idx, ICE_RCLKA_PIN);
+ *pin_num = ICE_E810_RCLK_PINS_NUM;
+ ret = 0;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ *pin_num = ICE_E822_RCLK_PINS_NUM;
+ ret = 0;
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032)
+ *base_idx = ZL_REF1P;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384)
+ *base_idx = SI_REF1P;
+ else
+ ret = -ENODEV;
- if (ice_is_e810(hw))
- return ice_ptp_init_phc_e810(hw);
- else
- return ice_ptp_init_phc_e822(hw);
+ break;
+ default:
+ ret = -ENODEV;
+ break;
+ }
+
+ return ret;
}
/**
- * ice_get_phy_tx_tstamp_ready - Read PHY Tx memory status indication
- * @hw: pointer to the HW struct
- * @block: the timestamp block to check
- * @tstamp_ready: storage for the PHY Tx memory status information
+ * ice_cgu_get_output_pin_state_caps - get output pin state capabilities
+ * @hw: pointer to the hw struct
+ * @pin_id: id of a pin
+ * @caps: capabilities to modify
*
- * Check the PHY for Tx timestamp memory status. This reports a 64 bit value
- * which indicates which timestamps in the block may be captured. A set bit
- * means the timestamp can be read. An unset bit means the timestamp is not
- * ready and software should avoid reading the register.
+ * Return:
+ * * 0 - success, state capabilities were modified
+ * * negative - failure, capabilities were not modified
*/
-int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready)
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps)
{
- if (ice_is_e810(hw))
- return ice_get_phy_tx_tstamp_ready_e810(hw, block,
- tstamp_ready);
+ bool can_change = true;
+
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
+ can_change = false;
+ break;
+ case ICE_DEV_ID_E823L_10G_BASE_T:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823L_BACKPLANE:
+ case ICE_DEV_ID_E823L_QSFP:
+ case ICE_DEV_ID_E823L_SFP:
+ case ICE_DEV_ID_E823C_10G_BASE_T:
+ case ICE_DEV_ID_E823C_BACKPLANE:
+ case ICE_DEV_ID_E823C_QSFP:
+ case ICE_DEV_ID_E823C_SFP:
+ case ICE_DEV_ID_E823C_SGMII:
+ if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
+ pin_id == ZL_OUT2)
+ can_change = false;
+ else if (hw->cgu_part_number ==
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
+ pin_id == SI_OUT1)
+ can_change = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (can_change)
+ *caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
else
- return ice_get_phy_tx_tstamp_ready_e822(hw, block,
- tstamp_ready);
+ *caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
+
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 3b68cb91bd81..cf76701566c7 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -3,13 +3,15 @@
#ifndef _ICE_PTP_HW_H_
#define _ICE_PTP_HW_H_
+#include <linux/dpll.h>
enum ice_ptp_tmr_cmd {
- INIT_TIME,
- INIT_INCVAL,
- ADJ_TIME,
- ADJ_TIME_AT_TIME,
- READ_TIME
+ ICE_PTP_INIT_TIME,
+ ICE_PTP_INIT_INCVAL,
+ ICE_PTP_ADJ_TIME,
+ ICE_PTP_ADJ_TIME_AT_TIME,
+ ICE_PTP_READ_TIME,
+ ICE_PTP_NOP,
};
enum ice_ptp_serdes {
@@ -109,9 +111,83 @@ struct ice_cgu_pll_params_e822 {
u32 post_pll_div;
};
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+enum ice_e810_c827_idx {
+ C827_0,
+ C827_1
+};
+
+enum ice_phy_rclk_pins {
+ ICE_RCLKA_PIN = 0, /* SCL pin */
+ ICE_RCLKB_PIN, /* SDA pin */
+};
+
+#define ICE_E810_RCLK_PINS_NUM (ICE_RCLKB_PIN + 1)
+#define ICE_E822_RCLK_PINS_NUM (ICE_RCLKA_PIN + 1)
+#define E810T_CGU_INPUT_C827(_phy, _pin) ((_phy) * ICE_E810_RCLK_PINS_NUM + \
+ (_pin) + ZL_REF1P)
+
+enum ice_zl_cgu_in_pins {
+ ZL_REF0P = 0,
+ ZL_REF0N,
+ ZL_REF1P,
+ ZL_REF1N,
+ ZL_REF2P,
+ ZL_REF2N,
+ ZL_REF3P,
+ ZL_REF3N,
+ ZL_REF4P,
+ ZL_REF4N,
+ NUM_ZL_CGU_INPUT_PINS
+};
+
+enum ice_zl_cgu_out_pins {
+ ZL_OUT0 = 0,
+ ZL_OUT1,
+ ZL_OUT2,
+ ZL_OUT3,
+ ZL_OUT4,
+ ZL_OUT5,
+ ZL_OUT6,
+ NUM_ZL_CGU_OUTPUT_PINS
+};
+
+enum ice_si_cgu_in_pins {
+ SI_REF0P = 0,
+ SI_REF0N,
+ SI_REF1P,
+ SI_REF1N,
+ SI_REF2P,
+ SI_REF2N,
+ SI_REF3,
+ SI_REF4,
+ NUM_SI_CGU_INPUT_PINS
+};
+
+enum ice_si_cgu_out_pins {
+ SI_OUT0 = 0,
+ SI_OUT1,
+ SI_OUT2,
+ SI_OUT3,
+ SI_OUT4,
+ NUM_SI_CGU_OUTPUT_PINS
+};
+
+struct ice_cgu_pin_desc {
+ char *name;
+ u8 index;
+ enum dpll_pin_type type;
+ u32 freq_supp_num;
+ struct dpll_pin_frequency *freq_supp;
+};
+
extern const struct
ice_cgu_pll_params_e822 e822_cgu_params[NUM_ICE_TIME_REF_FREQ];
+#define E810C_QSFP_C827_0_HANDLE 2
+#define E810C_QSFP_C827_1_HANDLE 3
+
/* Table of constants related to possible TIME_REF sources */
extern const struct ice_time_ref_info_e822 e822_time_ref[NUM_ICE_TIME_REF_FREQ];
@@ -127,6 +203,7 @@ extern const struct ice_vernier_info_e822 e822_vernier[NUM_ICE_PTP_LNK_SPD];
u8 ice_get_ptp_src_clock_index(struct ice_hw *hw);
bool ice_ptp_lock(struct ice_hw *hw);
void ice_ptp_unlock(struct ice_hw *hw);
+void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd);
int ice_ptp_init_time(struct ice_hw *hw, u64 time);
int ice_ptp_write_incval(struct ice_hw *hw, u64 incval);
int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval);
@@ -138,11 +215,8 @@ int ice_ptp_init_phc(struct ice_hw *hw);
int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready);
/* E822 family functions */
-int ice_read_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 *val);
-int ice_write_phy_reg_e822(struct ice_hw *hw, u8 port, u16 offset, u32 val);
int ice_read_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 *val);
int ice_write_quad_reg_e822(struct ice_hw *hw, u8 quad, u16 offset, u32 val);
-int ice_ptp_prep_port_adj_e822(struct ice_hw *hw, u8 port, s64 time);
void ice_ptp_reset_ts_memory_quad_e822(struct ice_hw *hw, u8 quad);
/**
@@ -197,6 +271,19 @@ int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data);
bool ice_is_pca9575_present(struct ice_hw *hw);
+enum dpll_pin_type ice_cgu_get_pin_type(struct ice_hw *hw, u8 pin, bool input);
+struct dpll_pin_frequency *
+ice_cgu_get_pin_freq_supp(struct ice_hw *hw, u8 pin, bool input, u8 *num);
+const char *ice_cgu_get_pin_name(struct ice_hw *hw, u8 pin, bool input);
+int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
+ enum dpll_lock_status last_dpll_state, u8 *pin,
+ u8 *ref_state, u8 *eec_mode, s64 *phase_offset,
+ enum dpll_lock_status *dpll_state);
+int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
+
+void ice_ptp_init_phy_model(struct ice_hw *hw);
+int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
+ unsigned long *caps);
#define PFTSYN_SEM_BYTES 4
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index e30e12321abd..c686ac0935eb 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -254,7 +254,7 @@ static const struct net_device_ops ice_repr_netdev_ops = {
* ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
* @netdev: pointer to netdev
*/
-bool ice_is_port_repr_netdev(struct net_device *netdev)
+bool ice_is_port_repr_netdev(const struct net_device *netdev)
{
return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
index 9c2a6f496b3b..e1ee2d2c1d2d 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.h
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -12,6 +12,7 @@ struct ice_repr {
struct ice_q_vector *q_vector;
struct net_device *netdev;
struct metadata_dst *dst;
+ struct ice_esw_br_port *br_port;
#ifdef CONFIG_ICE_SWITCHDEV
/* info about slow path rule */
struct ice_rule_query_data sp_rule;
@@ -27,5 +28,5 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr);
void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
-bool ice_is_port_repr_netdev(struct net_device *netdev);
+bool ice_is_port_repr_netdev(const struct net_device *netdev);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index b664d60fd037..2f4a621254e8 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -229,29 +229,22 @@ ice_aq_delete_sched_elems(struct ice_hw *hw, u16 grps_req,
* ice_sched_remove_elems - remove nodes from HW
* @hw: pointer to the HW struct
* @parent: pointer to the parent node
- * @num_nodes: number of nodes
- * @node_teids: array of node teids to be deleted
+ * @node_teid: node teid to be deleted
*
* This function remove nodes from HW
*/
static int
ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
- u16 num_nodes, u32 *node_teids)
+ u32 node_teid)
{
- struct ice_aqc_delete_elem *buf;
- u16 i, num_groups_removed = 0;
- u16 buf_size;
+ DEFINE_FLEX(struct ice_aqc_delete_elem, buf, teid, 1);
+ u16 buf_size = __struct_size(buf);
+ u16 num_groups_removed = 0;
int status;
- buf_size = struct_size(buf, teid, num_nodes);
- buf = devm_kzalloc(ice_hw_to_dev(hw), buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->hdr.parent_teid = parent->info.node_teid;
- buf->hdr.num_elems = cpu_to_le16(num_nodes);
- for (i = 0; i < num_nodes; i++)
- buf->teid[i] = cpu_to_le32(node_teids[i]);
+ buf->hdr.num_elems = cpu_to_le16(1);
+ buf->teid[0] = cpu_to_le32(node_teid);
status = ice_aq_delete_sched_elems(hw, 1, buf, buf_size,
&num_groups_removed, NULL);
@@ -259,7 +252,6 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
ice_debug(hw, ICE_DBG_SCHED, "remove node failed FW error %d\n",
hw->adminq.sq_last_status);
- devm_kfree(ice_hw_to_dev(hw), buf);
return status;
}
@@ -326,7 +318,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
node->info.data.elem_type != ICE_AQC_ELEM_TYPE_LEAF) {
u32 teid = le32_to_cpu(node->info.node_teid);
- ice_sched_remove_elems(hw, node->parent, 1, &teid);
+ ice_sched_remove_elems(hw, node->parent, teid);
}
parent = node->parent;
/* root has no parent */
@@ -437,24 +429,20 @@ ice_aq_cfg_sched_elems(struct ice_hw *hw, u16 elems_req,
}
/**
- * ice_aq_move_sched_elems - move scheduler elements
+ * ice_aq_move_sched_elems - move scheduler element (just 1 group)
* @hw: pointer to the HW struct
- * @grps_req: number of groups to move
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @grps_movd: returns total number of groups moved
- * @cd: pointer to command details structure or NULL
*
* Move scheduling elements (0x0408)
*/
-static int
-ice_aq_move_sched_elems(struct ice_hw *hw, u16 grps_req,
- struct ice_aqc_move_elem *buf, u16 buf_size,
- u16 *grps_movd, struct ice_sq_cd *cd)
+int
+ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd)
{
return ice_aqc_send_sched_elem_cmd(hw, ice_aqc_opc_move_sched_elems,
- grps_req, (void *)buf, buf_size,
- grps_movd, cd);
+ 1, buf, buf_size, grps_movd, NULL);
}
/**
@@ -526,7 +514,7 @@ ice_aq_query_sched_res(struct ice_hw *hw, u16 buf_size,
*
* This function suspends or resumes HW nodes
*/
-static int
+int
ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
bool suspend)
{
@@ -569,18 +557,24 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
{
struct ice_vsi_ctx *vsi_ctx;
struct ice_q_ctx *q_ctx;
+ u16 idx;
vsi_ctx = ice_get_vsi_ctx(hw, vsi_handle);
if (!vsi_ctx)
return -EINVAL;
/* allocate LAN queue contexts */
if (!vsi_ctx->lan_q_ctx[tc]) {
- vsi_ctx->lan_q_ctx[tc] = devm_kcalloc(ice_hw_to_dev(hw),
- new_numqs,
- sizeof(*q_ctx),
- GFP_KERNEL);
- if (!vsi_ctx->lan_q_ctx[tc])
+ q_ctx = devm_kcalloc(ice_hw_to_dev(hw), new_numqs,
+ sizeof(*q_ctx), GFP_KERNEL);
+ if (!q_ctx)
return -ENOMEM;
+
+ for (idx = 0; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
+ vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
return 0;
}
@@ -592,9 +586,16 @@ ice_alloc_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 new_numqs)
sizeof(*q_ctx), GFP_KERNEL);
if (!q_ctx)
return -ENOMEM;
+
memcpy(q_ctx, vsi_ctx->lan_q_ctx[tc],
prev_num * sizeof(*q_ctx));
devm_kfree(ice_hw_to_dev(hw), vsi_ctx->lan_q_ctx[tc]);
+
+ for (idx = prev_num; idx < new_numqs; idx++) {
+ q_ctx[idx].q_handle = ICE_INVAL_Q_HANDLE;
+ q_ctx[idx].q_teid = ICE_INVAL_TEID;
+ }
+
vsi_ctx->lan_q_ctx[tc] = q_ctx;
vsi_ctx->num_lan_q_entries[tc] = new_numqs;
}
@@ -1044,7 +1045,7 @@ ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
*
* This function add nodes to a given layer.
*/
-static int
+int
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
@@ -1119,7 +1120,7 @@ static u8 ice_sched_get_qgrp_layer(struct ice_hw *hw)
*
* This function returns the current VSI layer number
*/
-static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
+u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
{
/* Num Layers VSI layer
* 9 6
@@ -1142,7 +1143,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
*
* This function returns the current aggregator layer number
*/
-static u8 ice_sched_get_agg_layer(struct ice_hw *hw)
+u8 ice_sched_get_agg_layer(struct ice_hw *hw)
{
/* Num Layers aggregator layer
* 9 4
@@ -1180,7 +1181,7 @@ static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
int status;
/* remove the default leaf node */
- status = ice_sched_remove_elems(pi->hw, node->parent, 1, &teid);
+ status = ice_sched_remove_elems(pi->hw, node->parent, teid);
if (!status)
ice_free_sched_node(pi, node);
}
@@ -1577,7 +1578,7 @@ ice_sched_get_vsi_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* This function retrieves an aggregator node for a given aggregator ID from
* a given TC branch
*/
-static struct ice_sched_node *
+struct ice_sched_node *
ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
u32 agg_id)
{
@@ -2139,7 +2140,7 @@ ice_get_agg_info(struct ice_hw *hw, u32 agg_id)
* This function walks through the aggregator subtree to find a free parent
* node
*/
-static struct ice_sched_node *
+struct ice_sched_node *
ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
u16 *num_nodes)
{
@@ -2219,12 +2220,12 @@ int
ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
u16 num_items, u32 *list)
{
- struct ice_aqc_move_elem *buf;
+ DEFINE_FLEX(struct ice_aqc_move_elem, buf, teid, 1);
+ u16 buf_len = __struct_size(buf);
struct ice_sched_node *node;
u16 i, grps_movd = 0;
struct ice_hw *hw;
int status = 0;
- u16 buf_len;
hw = pi->hw;
@@ -2236,35 +2237,27 @@ ice_sched_move_nodes(struct ice_port_info *pi, struct ice_sched_node *parent,
hw->max_children[parent->tx_sched_layer])
return -ENOSPC;
- buf_len = struct_size(buf, teid, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
for (i = 0; i < num_items; i++) {
node = ice_sched_find_node_by_teid(pi->root, list[i]);
if (!node) {
status = -EINVAL;
- goto move_err_exit;
+ break;
}
buf->hdr.src_parent_teid = node->info.parent_teid;
buf->hdr.dest_parent_teid = parent->info.node_teid;
buf->teid[0] = node->info.node_teid;
buf->hdr.num_elems = cpu_to_le16(1);
- status = ice_aq_move_sched_elems(hw, 1, buf, buf_len,
- &grps_movd, NULL);
+ status = ice_aq_move_sched_elems(hw, buf, buf_len, &grps_movd);
if (status && grps_movd != 1) {
status = -EIO;
- goto move_err_exit;
+ break;
}
/* update the SW DB */
ice_sched_update_parent(parent, node);
}
-move_err_exit:
- kfree(buf);
return status;
}
@@ -3958,7 +3951,7 @@ ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
* This function sets BW limit of VSI or Aggregator scheduling node
* based on TC information from passed in argument BW.
*/
-int
+static int
ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
enum ice_agg_type agg_type, u8 tc,
enum ice_rl_type rl_type, u32 bw)
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9c100747445a..1aef05ea5a57 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -141,13 +141,28 @@ ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
int
ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_rl_type rl_type);
-int
-ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
- enum ice_agg_type agg_type, u8 tc,
- enum ice_rl_type rl_type, u32 bw);
int ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
+int
+ice_sched_suspend_resume_elems(struct ice_hw *hw, u8 num_nodes, u32 *node_teids,
+ bool suspend);
+struct ice_sched_node *
+ice_sched_get_agg_node(struct ice_port_info *pi, struct ice_sched_node *tc_node,
+ u32 agg_id);
+u8 ice_sched_get_agg_layer(struct ice_hw *hw);
+u8 ice_sched_get_vsi_layer(struct ice_hw *hw);
+struct ice_sched_node *
+ice_sched_get_free_vsi_parent(struct ice_hw *hw, struct ice_sched_node *node,
+ u16 *num_nodes);
+int
+ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
+ struct ice_sched_node *tc_node,
+ struct ice_sched_node *parent, u8 layer,
+ u16 num_nodes, u32 *first_node_teid,
+ u16 *num_nodes_added);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
+int ice_aq_move_sched_elems(struct ice_hw *hw, struct ice_aqc_move_elem *buf,
+ u16 buf_size, u16 *grps_movd);
int ice_replay_vsi_agg(struct ice_hw *hw, u16 vsi_handle);
int ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c
index 31314e7540f8..e1494f24f661 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.c
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.c
@@ -64,7 +64,7 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0;
}
- last_vector_idx = vf->first_vector_idx + pf->vfs.num_msix_per - 1;
+ last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
/* clear VF MDD event information */
memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
@@ -102,7 +102,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
first = vf->first_vector_idx;
- last = first + pf->vfs.num_msix_per - 1;
+ last = first + vf->num_msix - 1;
for (v = first; v <= last; v++) {
u32 reg;
@@ -138,6 +138,8 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
if (!pf)
return -EINVAL;
+ bitmap_free(pf->sriov_irq_bm);
+ pf->sriov_irq_size = 0;
pf->sriov_base_vector = 0;
return 0;
@@ -244,22 +246,6 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
return vsi;
}
-/**
- * ice_calc_vf_first_vector_idx - Calculate MSIX vector index in the PF space
- * @pf: pointer to PF structure
- * @vf: pointer to VF that the first MSIX vector index is being calculated for
- *
- * This returns the first MSIX vector index in PF space that is used by this VF.
- * This index is used when accessing PF relative registers such as
- * GLINT_VECT2FUNC and GLINT_DYN_CTL.
- * This will always be the OICR index in the AVF driver so any functionality
- * using vf->first_vector_idx for queue configuration will have to increment by
- * 1 to avoid meddling with the OICR index.
- */
-static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
-{
- return pf->sriov_base_vector + vf->vf_id * pf->vfs.num_msix_per;
-}
/**
* ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
@@ -280,12 +266,12 @@ static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
hw = &pf->hw;
pf_based_first_msix = vf->first_vector_idx;
- pf_based_last_msix = (pf_based_first_msix + pf->vfs.num_msix_per) - 1;
+ pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
device_based_first_msix = pf_based_first_msix +
pf->hw.func_caps.common_cap.msix_vector_first_id;
device_based_last_msix =
- (device_based_first_msix + pf->vfs.num_msix_per) - 1;
+ (device_based_first_msix + vf->num_msix) - 1;
device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
reg = (((device_based_first_msix << VPINT_ALLOC_FIRST_S) &
@@ -388,16 +374,11 @@ static void ice_ena_vf_mappings(struct ice_vf *vf)
*/
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
{
- struct ice_pf *pf;
-
if (!vf || !q_vector)
return -EINVAL;
- pf = vf->pf;
-
/* always add one to account for the OICR being the first MSIX */
- return pf->sriov_base_vector + pf->vfs.num_msix_per * vf->vf_id +
- q_vector->v_idx + 1;
+ return vf->first_vector_idx + q_vector->v_idx + 1;
}
/**
@@ -527,6 +508,52 @@ static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
}
/**
+ * ice_sriov_get_irqs - get irqs for SR-IOV usacase
+ * @pf: pointer to PF structure
+ * @needed: number of irqs to get
+ *
+ * This returns the first MSI-X vector index in PF space that is used by this
+ * VF. This index is used when accessing PF relative registers such as
+ * GLINT_VECT2FUNC and GLINT_DYN_CTL.
+ * This will always be the OICR index in the AVF driver so any functionality
+ * using vf->first_vector_idx for queue configuration_id: id of VF which will
+ * use this irqs
+ *
+ * Only SRIOV specific vectors are tracked in sriov_irq_bm. SRIOV vectors are
+ * allocated from the end of global irq index. First bit in sriov_irq_bm means
+ * last irq index etc. It simplifies extension of SRIOV vectors.
+ * They will be always located from sriov_base_vector to the last irq
+ * index. While increasing/decreasing sriov_base_vector can be moved.
+ */
+static int ice_sriov_get_irqs(struct ice_pf *pf, u16 needed)
+{
+ int res = bitmap_find_next_zero_area(pf->sriov_irq_bm,
+ pf->sriov_irq_size, 0, needed, 0);
+ /* conversion from number in bitmap to global irq index */
+ int index = pf->sriov_irq_size - res - needed;
+
+ if (res >= pf->sriov_irq_size || index < pf->sriov_base_vector)
+ return -ENOENT;
+
+ bitmap_set(pf->sriov_irq_bm, res, needed);
+ return index;
+}
+
+/**
+ * ice_sriov_free_irqs - free irqs used by the VF
+ * @pf: pointer to PF structure
+ * @vf: pointer to VF structure
+ */
+static void ice_sriov_free_irqs(struct ice_pf *pf, struct ice_vf *vf)
+{
+ /* Move back from first vector index to first index in bitmap */
+ int bm_i = pf->sriov_irq_size - vf->first_vector_idx - vf->num_msix;
+
+ bitmap_clear(pf->sriov_irq_bm, bm_i, vf->num_msix);
+ vf->first_vector_idx = 0;
+}
+
+/**
* ice_init_vf_vsi_res - initialize/setup VF VSI resources
* @vf: VF to initialize/setup the VSI for
*
@@ -539,7 +566,9 @@ static int ice_init_vf_vsi_res(struct ice_vf *vf)
struct ice_vsi *vsi;
int err;
- vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ return -ENOMEM;
vsi = ice_vf_vsi_setup(vf);
if (!vsi)
@@ -789,14 +818,19 @@ static const struct ice_vf_ops ice_sriov_vf_ops = {
*/
static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
{
+ struct pci_dev *pdev = pf->pdev;
struct ice_vfs *vfs = &pf->vfs;
+ struct pci_dev *vfdev = NULL;
struct ice_vf *vf;
- u16 vf_id;
- int err;
+ u16 vf_pdev_id;
+ int err, pos;
lockdep_assert_held(&vfs->table_lock);
- for (vf_id = 0; vf_id < num_vfs; vf_id++) {
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
+
+ for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
vf = kzalloc(sizeof(*vf), GFP_KERNEL);
if (!vf) {
err = -ENOMEM;
@@ -812,11 +846,28 @@ static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
ice_initialize_vf_entry(vf);
+ do {
+ vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
+ } while (vfdev && vfdev->physfn != pdev);
+ vf->vfdev = vfdev;
vf->vf_sw_id = pf->first_sw;
+ pci_dev_get(vfdev);
+
+ /* set default number of MSI-X */
+ vf->num_msix = pf->vfs.num_msix_per;
+ vf->num_vf_qs = pf->vfs.num_qps_per;
+ ice_vc_set_default_allowlist(vf);
+
hash_add_rcu(vfs->table, &vf->entry, vf_id);
}
+ /* Decrement of refcount done by pci_get_device() inside the loop does
+ * not touch the last iteration's vfdev, so it has to be done manually
+ * to balance pci_dev_get() added within the loop.
+ */
+ pci_dev_put(vfdev);
+
return 0;
err_free_entries:
@@ -831,10 +882,16 @@ err_free_entries:
*/
static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
{
+ int total_vectors = pf->hw.func_caps.common_cap.num_msix_vectors;
struct device *dev = ice_pf_to_dev(pf);
struct ice_hw *hw = &pf->hw;
int ret;
+ pf->sriov_irq_bm = bitmap_zalloc(total_vectors, GFP_KERNEL);
+ if (!pf->sriov_irq_bm)
+ return -ENOMEM;
+ pf->sriov_irq_size = total_vectors;
+
/* Disable global interrupt 0 so we don't try to handle the VFLR. */
wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
@@ -893,6 +950,7 @@ err_unroll_intr:
/* rearm interrupts here */
ice_irq_dynamic_ena(hw, NULL, NULL);
clear_bit(ICE_OICR_INTR_DIS, pf->state);
+ bitmap_free(pf->sriov_irq_bm);
return ret;
}
@@ -957,6 +1015,175 @@ static int ice_check_sriov_allowed(struct ice_pf *pf)
}
/**
+ * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
+ * @pdev: pointer to pci_dev struct
+ *
+ * The function is called via sysfs ops
+ */
+u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+
+ return pf->sriov_irq_size - ice_get_max_used_msix_vector(pf);
+}
+
+static int ice_sriov_move_base_vector(struct ice_pf *pf, int move)
+{
+ if (pf->sriov_base_vector - move < ice_get_max_used_msix_vector(pf))
+ return -ENOMEM;
+
+ pf->sriov_base_vector -= move;
+ return 0;
+}
+
+static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
+{
+ u16 vf_ids[ICE_MAX_SRIOV_VFS];
+ struct ice_vf *tmp_vf;
+ int to_remap = 0, bkt;
+
+ /* For better irqs usage try to remap irqs of VFs
+ * that aren't running yet
+ */
+ ice_for_each_vf(pf, bkt, tmp_vf) {
+ /* skip VF which is changing the number of MSI-X */
+ if (restricted_id == tmp_vf->vf_id ||
+ test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
+ continue;
+
+ ice_dis_vf_mappings(tmp_vf);
+ ice_sriov_free_irqs(pf, tmp_vf);
+
+ vf_ids[to_remap] = tmp_vf->vf_id;
+ to_remap += 1;
+ }
+
+ for (int i = 0; i < to_remap; i++) {
+ tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
+ if (!tmp_vf)
+ continue;
+
+ tmp_vf->first_vector_idx =
+ ice_sriov_get_irqs(pf, tmp_vf->num_msix);
+ /* there is no need to rebuild VSI as we are only changing the
+ * vector indexes not amount of MSI-X or queues
+ */
+ ice_ena_vf_mappings(tmp_vf);
+ ice_put_vf(tmp_vf);
+ }
+}
+
+/**
+ * ice_sriov_set_msix_vec_count
+ * @vf_dev: pointer to pci_dev struct of VF device
+ * @msix_vec_count: new value for MSI-X amount on this VF
+ *
+ * Set requested MSI-X, queues and registers for @vf_dev.
+ *
+ * First do some sanity checks like if there are any VFs, if the new value
+ * is correct etc. Then disable old mapping (MSI-X and queues registers), change
+ * MSI-X and queues, rebuild VSI and enable new mapping.
+ *
+ * If it is possible (driver not binded to VF) try to remap also other VFs to
+ * linearize irqs register usage.
+ */
+int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
+{
+ struct pci_dev *pdev = pci_physfn(vf_dev);
+ struct ice_pf *pf = pci_get_drvdata(pdev);
+ u16 prev_msix, prev_queues, queues;
+ bool needs_rebuild = false;
+ struct ice_vf *vf;
+ int id;
+
+ if (!ice_get_num_vfs(pf))
+ return -ENOENT;
+
+ if (!msix_vec_count)
+ return 0;
+
+ queues = msix_vec_count;
+ /* add 1 MSI-X for OICR */
+ msix_vec_count += 1;
+
+ if (queues > min(ice_get_avail_txq_count(pf),
+ ice_get_avail_rxq_count(pf)))
+ return -EINVAL;
+
+ if (msix_vec_count < ICE_MIN_INTR_PER_VF)
+ return -EINVAL;
+
+ /* Transition of PCI VF function number to function_id */
+ for (id = 0; id < pci_num_vf(pdev); id++) {
+ if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
+ break;
+ }
+
+ if (id == pci_num_vf(pdev))
+ return -ENOENT;
+
+ vf = ice_get_vf_by_id(pf, id);
+
+ if (!vf)
+ return -ENOENT;
+
+ prev_msix = vf->num_msix;
+ prev_queues = vf->num_vf_qs;
+
+ if (ice_sriov_move_base_vector(pf, msix_vec_count - prev_msix)) {
+ ice_put_vf(vf);
+ return -ENOSPC;
+ }
+
+ ice_dis_vf_mappings(vf);
+ ice_sriov_free_irqs(pf, vf);
+
+ /* Remap all VFs beside the one is now configured */
+ ice_sriov_remap_vectors(pf, vf->vf_id);
+
+ vf->num_msix = msix_vec_count;
+ vf->num_vf_qs = queues;
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ goto unroll;
+
+ ice_vf_vsi_release(vf);
+ if (vf->vf_ops->create_vsi(vf)) {
+ /* Try to rebuild with previous values */
+ needs_rebuild = true;
+ goto unroll;
+ }
+
+ dev_info(ice_pf_to_dev(pf),
+ "Changing VF %d resources to %d vectors and %d queues\n",
+ vf->vf_id, vf->num_msix, vf->num_vf_qs);
+
+ ice_ena_vf_mappings(vf);
+ ice_put_vf(vf);
+
+ return 0;
+
+unroll:
+ dev_info(ice_pf_to_dev(pf),
+ "Can't set %d vectors on VF %d, falling back to %d\n",
+ vf->num_msix, vf->vf_id, prev_msix);
+
+ vf->num_msix = prev_msix;
+ vf->num_vf_qs = prev_queues;
+ vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
+ if (vf->first_vector_idx < 0)
+ return -EINVAL;
+
+ if (needs_rebuild)
+ vf->vf_ops->create_vsi(vf);
+
+ ice_ena_vf_mappings(vf);
+ ice_put_vf(vf);
+
+ return -EINVAL;
+}
+
+/**
* ice_sriov_configure - Enable or change number of VFs via sysfs
* @pdev: pointer to a pci_dev structure
* @num_vfs: number of VFs to allocate or 0 to free VFs
@@ -1709,31 +1936,16 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
/**
* ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
- * @pdev: pointer to a pci_dev structure
+ * @pf: pointer to the PF structure
*
* Called when recovering from a PF FLR to restore interrupt capability to
* the VFs.
*/
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
{
- u16 vf_id;
- int pos;
-
- if (!pci_num_vf(pdev))
- return;
+ struct ice_vf *vf;
+ u32 bkt;
- pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
- if (pos) {
- struct pci_dev *vfdev;
-
- pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
- &vf_id);
- vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
- while (vfdev) {
- if (vfdev->is_virtfn && vfdev->physfn == pdev)
- pci_restore_msi_state(vfdev);
- vfdev = pci_get_device(pdev->vendor, vf_id,
- vfdev);
- }
- }
+ ice_for_each_vf(pf, bkt, vf)
+ pci_restore_msi_state(vf->vfdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h
index 346cb2666f3a..8488df38b586 100644
--- a/drivers/net/ethernet/intel/ice/ice_sriov.h
+++ b/drivers/net/ethernet/intel/ice/ice_sriov.h
@@ -33,7 +33,7 @@ int
ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi);
void ice_free_vfs(struct ice_pf *pf);
-void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
+void ice_restore_all_vfs_msi_state(struct ice_pf *pf);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
@@ -60,6 +60,8 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf);
void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
bool
ice_vc_validate_pattern(struct ice_vf *vf, struct virtchnl_proto_hdrs *proto);
+u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev);
+int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count);
#else /* CONFIG_PCI_IOV */
static inline void ice_process_vflr_event(struct ice_pf *pf) { }
static inline void ice_free_vfs(struct ice_pf *pf) { }
@@ -67,7 +69,7 @@ static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
-static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline void ice_restore_all_vfs_msi_state(struct ice_pf *pf) { }
static inline int
ice_sriov_configure(struct pci_dev __always_unused *pdev,
@@ -142,5 +144,16 @@ ice_get_vf_stats(struct net_device __always_unused *netdev,
{
return -EOPNOTSUPP;
}
+
+static inline u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+static inline int
+ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
+{
+ return -EOPNOTSUPP;
+}
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_SRIOV_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 6db4ca7978cb..ee19f3aa3d19 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -20,12 +20,11 @@
* byte 0 = 0x2: to identify it as locally administered DA MAC
* byte 6 = 0x2: to identify it as locally administered SA MAC
* byte 12 = 0x81 & byte 13 = 0x00:
- * In case of VLAN filter first two bytes defines ether type (0x8100)
- * and remaining two bytes are placeholder for programming a given VLAN ID
- * In case of Ether type filter it is treated as header without VLAN tag
- * and byte 12 and 13 is used to program a given Ether type instead
+ * In case of VLAN filter first two bytes defines ether type (0x8100)
+ * and remaining two bytes are placeholder for programming a given VLAN ID
+ * In case of Ether type filter it is treated as header without VLAN tag
+ * and byte 12 and 13 is used to program a given Ether type instead
*/
-#define DUMMY_ETH_HDR_LEN 16
static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
@@ -1369,14 +1368,6 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(tcp, 0),
};
-#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
-#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
- ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
-#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
- ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
-#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
-#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
-
/* this is a recipe to profile association bitmap */
static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
ICE_MAX_NUM_PROFILES);
@@ -1821,15 +1812,11 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
enum ice_sw_lkup_type lkup_type,
enum ice_adminq_opc opc)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
struct ice_aqc_res_elem *vsi_ele;
- u16 buf_len;
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = devm_kzalloc(ice_hw_to_dev(hw), buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
sw_buf->num_elems = cpu_to_le16(1);
if (lkup_type == ICE_SW_LKUP_MAC ||
@@ -1841,28 +1828,30 @@ ice_aq_alloc_free_vsi_list(struct ice_hw *hw, u16 *vsi_list_id,
lkup_type == ICE_SW_LKUP_DFLT) {
sw_buf->res_type = cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_REP);
} else if (lkup_type == ICE_SW_LKUP_VLAN) {
- sw_buf->res_type =
- cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
+ if (opc == ice_aqc_opc_alloc_res)
+ sw_buf->res_type =
+ cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ else
+ sw_buf->res_type =
+ cpu_to_le16(ICE_AQC_RES_TYPE_VSI_LIST_PRUNE);
} else {
- status = -EINVAL;
- goto ice_aq_alloc_free_vsi_list_exit;
+ return -EINVAL;
}
if (opc == ice_aqc_opc_free_res)
sw_buf->elem[0].e.sw_resp = cpu_to_le16(*vsi_list_id);
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len, opc, NULL);
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, opc);
if (status)
- goto ice_aq_alloc_free_vsi_list_exit;
+ return status;
if (opc == ice_aqc_opc_alloc_res) {
vsi_ele = &sw_buf->elem[0];
*vsi_list_id = le16_to_cpu(vsi_ele->e.sw_resp);
}
-ice_aq_alloc_free_vsi_list_exit:
- devm_kfree(ice_hw_to_dev(hw), sw_buf);
- return status;
+ return 0;
}
/**
@@ -1910,7 +1899,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
*
* Add(0x0290)
*/
-static int
+int
ice_aq_add_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 num_recipes, struct ice_sq_cd *cd)
@@ -1947,7 +1936,7 @@ ice_aq_add_recipe(struct ice_hw *hw,
* The caller must supply enough space in s_recipe_list to hold all possible
* recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
*/
-static int
+int
ice_aq_get_recipe(struct ice_hw *hw,
struct ice_aqc_recipe_data_elem *s_recipe_list,
u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
@@ -2040,7 +2029,7 @@ error_out:
* @cd: pointer to command details structure or NULL
* Recipe to profile association (0x0291)
*/
-static int
+int
ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -2066,7 +2055,7 @@ ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @cd: pointer to command details structure or NULL
* Associate profile ID with given recipe (0x0293)
*/
-static int
+int
ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
struct ice_sq_cd *cd)
{
@@ -2090,26 +2079,20 @@ ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
* @hw: pointer to the hardware structure
* @rid: recipe ID returned as response to AQ call
*/
-static int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+int ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
{
- struct ice_aqc_alloc_free_res_elem *sw_buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, sw_buf, elem, 1);
+ u16 buf_len = __struct_size(sw_buf);
int status;
- buf_len = struct_size(sw_buf, elem, 1);
- sw_buf = kzalloc(buf_len, GFP_KERNEL);
- if (!sw_buf)
- return -ENOMEM;
-
sw_buf->num_elems = cpu_to_le16(1);
sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
ICE_AQC_RES_TYPE_S) |
ICE_AQC_RES_TYPE_FLAG_SHARED);
- status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res);
if (!status)
*rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
- kfree(sw_buf);
return status;
}
@@ -2272,6 +2255,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
/* Propagate some data to the recipe database */
recps[idx].is_root = !!is_root;
recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ recps[idx].need_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+ recps[idx].allow_pass_l2 = root_bufs.content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
recps[idx].chain_idx = root_bufs.content.result_indx &
@@ -2460,6 +2447,15 @@ static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
}
/**
+ * ice_fill_eth_hdr - helper to copy dummy_eth_hdr into supplied buffer
+ * @eth_hdr: pointer to buffer to populate
+ */
+void ice_fill_eth_hdr(u8 *eth_hdr)
+{
+ memcpy(eth_hdr, dummy_eth_header, DUMMY_ETH_HDR_LEN);
+}
+
+/**
* ice_fill_sw_rule - Helper function to fill switch rule structure
* @hw: pointer to the hardware structure
* @f_info: entry containing packet forwarding information
@@ -3118,7 +3114,7 @@ ice_find_rule_entry(struct ice_hw *hw, u8 recp_id, struct ice_fltr_info *f_info)
* handle element. This can be extended further to search VSI list with more
* than 1 vsi_count. Returns pointer to VSI list entry if found.
*/
-static struct ice_vsi_list_map_info *
+struct ice_vsi_list_map_info *
ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
u16 *vsi_list_id)
{
@@ -3129,7 +3125,7 @@ ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
list_head = &sw->recp_list[recp_id].filt_rules;
list_for_each_entry(list_itr, list_head, list_entry) {
- if (list_itr->vsi_count == 1 && list_itr->vsi_list_info) {
+ if (list_itr->vsi_list_info) {
map_info = list_itr->vsi_list_info;
if (test_bit(vsi_handle, map_info->vsi_map)) {
*vsi_list_id = map_info->vsi_list_id;
@@ -3400,54 +3396,6 @@ exit:
}
/**
- * ice_mac_fltr_exist - does this MAC filter exist for given VSI
- * @hw: pointer to the hardware structure
- * @mac: MAC address to be checked (for MAC filter)
- * @vsi_handle: check MAC filter for this VSI
- */
-bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
-{
- struct ice_fltr_mgmt_list_entry *entry;
- struct list_head *rule_head;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 hw_vsi_id;
-
- if (!ice_is_vsi_valid(hw, vsi_handle))
- return false;
-
- hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
- sw = hw->switch_info;
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
- if (!rule_head)
- return false;
-
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
- mutex_lock(rule_lock);
- list_for_each_entry(entry, rule_head, list_entry) {
- struct ice_fltr_info *f_info = &entry->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_zero_ether_addr(mac_addr))
- continue;
-
- if (f_info->flag != ICE_FLTR_TX ||
- f_info->src_id != ICE_SRC_ID_VSI ||
- f_info->lkup_type != ICE_SW_LKUP_MAC ||
- f_info->fltr_act != ICE_FWD_TO_VSI ||
- hw_vsi_id != f_info->fwd_id.hw_vsi_id)
- continue;
-
- if (ether_addr_equal(mac, mac_addr)) {
- mutex_unlock(rule_lock);
- return true;
- }
- }
- mutex_unlock(rule_lock);
- return false;
-}
-
-/**
* ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
* @hw: pointer to the hardware structure
* @vlan_id: VLAN ID
@@ -4473,29 +4421,19 @@ int
ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 *counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Allocate resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_alloc_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_alloc_res);
if (status)
- goto exit;
+ return status;
*counter_id = le16_to_cpu(buf->elem[0].e.sw_resp);
-
-exit:
- kfree(buf);
return status;
}
@@ -4511,27 +4449,19 @@ int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id)
{
- struct ice_aqc_alloc_free_res_elem *buf;
- u16 buf_len;
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
int status;
- /* Free resource */
- buf_len = struct_size(buf, elem, 1);
- buf = kzalloc(buf_len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
buf->num_elems = cpu_to_le16(num_items);
buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
ICE_AQC_RES_TYPE_M) | alloc_shared);
buf->elem[0].e.sw_resp = cpu_to_le16(counter_id);
- status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
- ice_aqc_opc_free_res, NULL);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len, ice_aqc_opc_free_res);
if (status)
ice_debug(hw, ICE_DBG_SW, "counter resource could not be freed\n");
- kfree(buf);
return status;
}
@@ -4540,6 +4470,39 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
.offs = {__VA_ARGS__}, \
}
+/**
+ * ice_share_res - set a resource as shared or dedicated
+ * @hw: hw struct of original owner of resource
+ * @type: resource type
+ * @shared: is the resource being set to shared
+ * @res_id: resource id (descriptor)
+ */
+int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id)
+{
+ DEFINE_FLEX(struct ice_aqc_alloc_free_res_elem, buf, elem, 1);
+ u16 buf_len = __struct_size(buf);
+ int status;
+
+ buf->num_elems = cpu_to_le16(1);
+ if (shared)
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ else
+ buf->res_type = cpu_to_le16(((type << ICE_AQC_RES_TYPE_S) &
+ ICE_AQC_RES_TYPE_M) &
+ ~ICE_AQC_RES_TYPE_FLAG_SHARED);
+
+ buf->elem[0].e.sw_resp = cpu_to_le16(res_id);
+ status = ice_aq_alloc_free_res(hw, buf, buf_len,
+ ice_aqc_opc_share_res);
+ if (status)
+ ice_debug(hw, ICE_DBG_SW, "Could not set resource type %u id %u to %s\n",
+ type, res_id, shared ? "SHARED" : "DEDICATED");
+
+ return status;
+}
+
/* This is mapping table entry that maps every word within a given protocol
* structure to the real byte offset as per the specification of that
* protocol header.
@@ -4613,13 +4576,13 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
* ice_find_recp - find a recipe
* @hw: pointer to the hardware structure
* @lkup_exts: extension sequence to match
- * @tun_type: type of recipe tunnel
+ * @rinfo: information regarding the rule e.g. priority and action info
*
* Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
*/
static u16
ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
- enum ice_sw_tunnel_type tun_type)
+ const struct ice_adv_rule_info *rinfo)
{
bool refresh_required = true;
struct ice_sw_recipe *recp;
@@ -4680,9 +4643,12 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts,
}
/* If for "i"th recipe the found was never set to false
* then it means we found our match
- * Also tun type of recipe needs to be checked
+ * Also tun type and *_pass_l2 of recipe needs to be
+ * checked
*/
- if (found && recp[i].tun_type == tun_type)
+ if (found && recp[i].tun_type == rinfo->tun_type &&
+ recp[i].need_pass_l2 == rinfo->need_pass_l2 &&
+ recp[i].allow_pass_l2 == rinfo->allow_pass_l2)
return i; /* Return the recipe ID */
}
}
@@ -4952,6 +4918,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
unsigned long *profiles)
{
DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_content *content;
struct ice_aqc_recipe_data_elem *tmp;
struct ice_aqc_recipe_data_elem *buf;
struct ice_recp_grp_entry *entry;
@@ -5012,6 +4979,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
/* Clear the result index of the located recipe, as this will be
* updated, if needed, later in the recipe creation process.
*/
@@ -5022,26 +4991,24 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
/* if the recipe is a non-root recipe RID should be programmed
* as 0 for the rules to be applied correctly.
*/
- buf[recps].content.rid = 0;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ content->rid = 0;
+ memset(&content->lkup_indx, 0,
+ sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
/* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
* to be 0
*/
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] = 0x80;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = 0x80;
+ content->mask[i] = 0;
}
for (i = 0; i < entry->r_group.n_val_pairs; i++) {
- buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
- buf[recps].content.mask[i + 1] =
- cpu_to_le16(entry->fv_mask[i]);
+ content->lkup_indx[i + 1] = entry->fv_idx[i];
+ content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]);
}
if (rm->n_grp_count > 1) {
@@ -5055,7 +5022,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
}
entry->chain_idx = chain_idx;
- buf[recps].content.result_indx =
+ content->result_indx =
ICE_AQ_RECIPE_RESULT_EN |
((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
ICE_AQ_RECIPE_RESULT_DATA_M);
@@ -5069,7 +5036,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
ICE_MAX_NUM_RECIPES);
set_bit(buf[recps].recipe_indx,
(unsigned long *)buf[recps].recipe_bitmap);
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
+
+ if (rm->need_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2;
+
+ if (rm->allow_pass_l2)
+ content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2;
recps++;
}
@@ -5107,9 +5080,11 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
if (status)
goto err_unroll;
+ content = &buf[recps].content;
+
buf[recps].recipe_indx = (u8)rid;
- buf[recps].content.rid = (u8)rid;
- buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ content->rid = (u8)rid;
+ content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
/* the new entry created should also be part of rg_list to
* make sure we have complete recipe
*/
@@ -5121,16 +5096,13 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
goto err_unroll;
}
last_chain_entry->rid = rid;
- memset(&buf[recps].content.lkup_indx, 0,
- sizeof(buf[recps].content.lkup_indx));
+ memset(&content->lkup_indx, 0, sizeof(content->lkup_indx));
/* All recipes use look-up index 0 to match switch ID. */
- buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
- buf[recps].content.mask[0] =
- cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
- buf[recps].content.lkup_indx[i] =
- ICE_AQ_RECIPE_LKUP_IGNORE;
- buf[recps].content.mask[i] = 0;
+ content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE;
+ content->mask[i] = 0;
}
i = 1;
@@ -5142,8 +5114,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
list_for_each_entry(entry, &rm->rg_list, l_entry) {
last_chain_entry->fv_idx[i] = entry->chain_idx;
- buf[recps].content.lkup_indx[i] = entry->chain_idx;
- buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ content->lkup_indx[i] = entry->chain_idx;
+ content->mask[i++] = cpu_to_le16(0xFFFF);
set_bit(entry->rid, rm->r_bitmap);
}
list_add(&last_chain_entry->l_entry, &rm->rg_list);
@@ -5155,7 +5127,7 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
status = -EINVAL;
goto err_unroll;
}
- buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ content->act_ctrl_fwd_priority = rm->priority;
recps++;
rm->root_rid = (u8)rid;
@@ -5220,6 +5192,8 @@ ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
recp->n_grp_count = rm->n_grp_count;
recp->tun_type = rm->tun_type;
+ recp->need_pass_l2 = rm->need_pass_l2;
+ recp->allow_pass_l2 = rm->allow_pass_l2;
recp->recp_created = true;
}
rm->root_buf = buf;
@@ -5388,6 +5362,9 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
/* set the recipe priority if specified */
rm->priority = (u8)rinfo->priority;
+ rm->need_pass_l2 = rinfo->need_pass_l2;
+ rm->allow_pass_l2 = rinfo->allow_pass_l2;
+
/* Find offsets from the field vector. Pick the first one for all the
* recipes.
*/
@@ -5403,7 +5380,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
}
/* Look for a recipe which matches our requested fv / mask list */
- *rid = ice_find_recp(hw, lkup_exts, rinfo->tun_type);
+ *rid = ice_find_recp(hw, lkup_exts, rinfo);
if (*rid < ICE_MAX_NUM_RECIPES)
/* Success if found a recipe that match the existing criteria */
goto err_unroll;
@@ -5839,7 +5816,9 @@ static bool ice_rules_equal(const struct ice_adv_rule_info *first,
return first->sw_act.flag == second->sw_act.flag &&
first->tun_type == second->tun_type &&
first->vlan_type == second->vlan_type &&
- first->src_vsi == second->src_vsi;
+ first->src_vsi == second->src_vsi &&
+ first->need_pass_l2 == second->need_pass_l2 &&
+ first->allow_pass_l2 == second->allow_pass_l2;
}
/**
@@ -5994,14 +5973,21 @@ ice_adv_add_update_vsi_list(struct ice_hw *hw,
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_TUNNEL] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID21] |=
cpu_to_be16(ICE_PKT_TUNNEL_MASK);
}
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup)
+{
+ lkup->type = ICE_HW_METADATA;
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
+ cpu_to_be16(ICE_PKT_FROM_NETWORK);
+}
+
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup)
{
lkup->type = ICE_HW_METADATA;
- lkup->m_u.metadata.flags[ICE_PKT_FLAGS_VLAN] =
+ lkup->m_u.metadata.flags[ICE_PKT_FLAGS_MDID20] |=
cpu_to_be16(ICE_PKT_VLAN_MASK);
}
@@ -6078,7 +6064,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
- rinfo->sw_act.fltr_act == ICE_DROP_PACKET)) {
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET ||
+ rinfo->sw_act.fltr_act == ICE_NOP)) {
status = -EIO;
goto free_pkt_profile;
}
@@ -6089,7 +6076,8 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
goto free_pkt_profile;
}
- if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_NOP)
rinfo->sw_act.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
@@ -6159,6 +6147,11 @@ ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
ICE_SINGLE_ACT_VALID_BIT;
break;
+ case ICE_NOP:
+ act |= FIELD_PREP(ICE_SINGLE_ACT_VSI_ID_M,
+ rinfo->sw_act.fwd_id.hw_vsi_id);
+ act &= ~ICE_SINGLE_ACT_VALID_BIT;
+ break;
default:
status = -EIO;
goto err_ice_add_adv_rule;
@@ -6439,7 +6432,7 @@ ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
return -EIO;
}
- rid = ice_find_recp(hw, &lkup_exts, rinfo->tun_type);
+ rid = ice_find_recp(hw, &lkup_exts, rinfo);
/* If did not find a recipe that match the existing criteria */
if (rid == ICE_MAX_NUM_RECIPES)
return -EINVAL;
@@ -6533,59 +6526,6 @@ ice_rem_adv_rule_by_id(struct ice_hw *hw,
}
/**
- * ice_rem_adv_rule_for_vsi - removes existing advanced switch rules for a
- * given VSI handle
- * @hw: pointer to the hardware structure
- * @vsi_handle: VSI handle for which we are supposed to remove all the rules.
- *
- * This function is used to remove all the rules for a given VSI and as soon
- * as removing a rule fails, it will return immediately with the error code,
- * else it will return success.
- */
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle)
-{
- struct ice_adv_fltr_mgmt_list_entry *list_itr, *tmp_entry;
- struct ice_vsi_list_map_info *map_info;
- struct ice_adv_rule_info rinfo;
- struct list_head *list_head;
- struct ice_switch_info *sw;
- int status;
- u8 rid;
-
- sw = hw->switch_info;
- for (rid = 0; rid < ICE_MAX_NUM_RECIPES; rid++) {
- if (!sw->recp_list[rid].recp_created)
- continue;
- if (!sw->recp_list[rid].adv_rule)
- continue;
-
- list_head = &sw->recp_list[rid].filt_rules;
- list_for_each_entry_safe(list_itr, tmp_entry, list_head,
- list_entry) {
- rinfo = list_itr->rule_info;
-
- if (rinfo.sw_act.fltr_act == ICE_FWD_TO_VSI_LIST) {
- map_info = list_itr->vsi_list_info;
- if (!map_info)
- continue;
-
- if (!test_bit(vsi_handle, map_info->vsi_map))
- continue;
- } else if (rinfo.sw_act.vsi_handle != vsi_handle) {
- continue;
- }
-
- rinfo.sw_act.vsi_handle = vsi_handle;
- status = ice_rem_adv_rule(hw, list_itr->lkups,
- list_itr->lkups_cnt, &rinfo);
- if (status)
- return status;
- }
- }
- return 0;
-}
-
-/**
* ice_replay_vsi_adv_rule - Replay advanced rule for requested VSI
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c84b56fe84a5..db7e501b7e0a 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -22,6 +22,16 @@
#define ICE_PROFID_IPV6_GTPU_TEID 46
#define ICE_PROFID_IPV6_GTPU_IPV6_TCP_INNER 70
+#define ICE_SW_RULE_VSI_LIST_SIZE(s, n) struct_size((s), vsi, (n))
+#define ICE_SW_RULE_RX_TX_HDR_SIZE(s, l) struct_size((s), hdr_data, (l))
+#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), DUMMY_ETH_HDR_LEN)
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE(s) \
+ ICE_SW_RULE_RX_TX_HDR_SIZE((s), 0)
+#define ICE_SW_RULE_LG_ACT_SIZE(s, n) struct_size((s), act, (n))
+
+#define DUMMY_ETH_HDR_LEN 16
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -191,6 +201,8 @@ struct ice_adv_rule_info {
u16 vlan_type;
u16 fltr_rule_id;
u32 priority;
+ u16 need_pass_l2:1;
+ u16 allow_pass_l2:1;
u16 src_vsi;
struct ice_sw_act_ctrl sw_act;
struct ice_adv_rule_flags_info flags_info;
@@ -254,6 +266,9 @@ struct ice_sw_recipe {
*/
u8 priority;
+ u8 need_pass_l2:1;
+ u8 allow_pass_l2:1;
+
struct list_head rg_list;
/* AQ buffer associated with this recipe */
@@ -340,9 +355,11 @@ ice_alloc_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
int
ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
+int ice_share_res(struct ice_hw *hw, u16 type, u8 shared, u16 res_id);
/* Switch/bridge related commands */
void ice_rule_add_tunnel_metadata(struct ice_adv_lkup_elem *lkup);
+void ice_rule_add_direction_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_vlan_metadata(struct ice_adv_lkup_elem *lkup);
void ice_rule_add_src_vsi_metadata(struct ice_adv_lkup_elem *lkup);
int
@@ -354,7 +371,6 @@ int ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
int ice_remove_vlan(struct ice_hw *hw, struct list_head *v_list);
int ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
-bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
int ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
@@ -379,7 +395,6 @@ int
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
-int ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
int
ice_rem_adv_rule_by_id(struct ice_hw *hw,
struct ice_rule_query_data *remove_entry);
@@ -389,6 +404,7 @@ u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
int ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+void ice_fill_eth_hdr(u8 *eth_hdr);
int
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
@@ -397,4 +413,21 @@ int
ice_update_recipe_lkup_idx(struct ice_hw *hw,
struct ice_update_recipe_lkup_idx_params *params);
void ice_change_proto_id_to_dvm(void);
+struct ice_vsi_list_map_info *
+ice_find_vsi_list_entry(struct ice_hw *hw, u8 recp_id, u16 vsi_handle,
+ u16 *vsi_list_id);
+int ice_alloc_recipe(struct ice_hw *hw, u16 *rid);
+int ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd);
+int ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd);
+int
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd);
+int
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd);
+
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index 4a34ef5f58d3..dd03cb69ad26 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -7,6 +7,8 @@
#include "ice_lib.h"
#include "ice_protocol_type.h"
+#define ICE_TC_METADATA_LKUP_IDX 0
+
/**
* ice_tc_count_lkups - determine lookup count for switch filter
* @flags: TC-flower flags
@@ -19,7 +21,13 @@ static int
ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
struct ice_tc_flower_fltr *fltr)
{
- int lkups_cnt = 0;
+ int lkups_cnt = 1; /* 0th lookup is metadata */
+
+ /* Always add metadata as the 0th lookup. Included elements:
+ * - Direction flag (always present)
+ * - ICE_TC_FLWR_FIELD_VLAN_TPID (present if specified)
+ * - Tunnel flag (present if tunnel)
+ */
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
lkups_cnt++;
@@ -54,10 +62,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
- /* is VLAN TPID specified */
- if (flags & ICE_TC_FLWR_FIELD_VLAN_TPID)
- lkups_cnt++;
-
/* is CVLAN specified? */
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
@@ -84,10 +88,6 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
lkups_cnt++;
- /* matching for tunneled packets in metadata */
- if (fltr->tunnel_type != TNL_LAST)
- lkups_cnt++;
-
return lkups_cnt;
}
@@ -176,10 +176,9 @@ static u16 ice_check_supported_vlan_tpid(u16 vlan_tpid)
static int
ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
- struct ice_adv_lkup_elem *list)
+ struct ice_adv_lkup_elem *list, int i)
{
struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
- int i = 0;
if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
u32 tenant_id;
@@ -329,8 +328,7 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
}
/* always fill matching on tunneled packets in metadata */
- ice_rule_add_tunnel_metadata(&list[i]);
- i++;
+ ice_rule_add_tunnel_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
return i;
}
@@ -358,13 +356,16 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
bool inner = false;
u16 vlan_tpid = 0;
- int i = 0;
+ int i = 1; /* 0th lookup is metadata */
rule_info->vlan_type = vlan_tpid;
+ /* Always add direction metadata */
+ ice_rule_add_direction_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
+
rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
if (tc_fltr->tunnel_type != TNL_LAST) {
- i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list, i);
headers = &tc_fltr->inner_headers;
inner = true;
@@ -431,8 +432,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
- ice_rule_add_vlan_metadata(&list[i]);
- i++;
+ ice_rule_add_vlan_metadata(&list[ICE_TC_METADATA_LKUP_IDX]);
}
if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
@@ -630,32 +630,83 @@ bool ice_is_tunnel_supported(struct net_device *dev)
return ice_tc_tun_get_type(dev) != TNL_LAST;
}
-static int
-ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
- struct flow_action_entry *act)
+static bool ice_tc_is_dev_uplink(struct net_device *dev)
+{
+ return netif_is_ice(dev) || ice_is_tunnel_supported(dev);
+}
+
+static int ice_tc_setup_redirect_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct net_device *target_dev)
{
struct ice_repr *repr;
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_is_port_repr_netdev(filter_dev) &&
+ ice_tc_is_dev_uplink(target_dev)) {
+ repr = ice_netdev_to_repr(filter_dev);
+
+ fltr->dest_vsi = repr->src_vsi->back->switchdev.uplink_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev) &&
+ ice_is_port_repr_netdev(target_dev)) {
+ repr = ice_netdev_to_repr(target_dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_tc_setup_drop_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr)
+{
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+
+ if (ice_is_port_repr_netdev(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else if (ice_tc_is_dev_uplink(filter_dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ice_eswitch_tc_parse_action(struct net_device *filter_dev,
+ struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ int err;
+
switch (act->id) {
case FLOW_ACTION_DROP:
- fltr->action.fltr_act = ICE_DROP_PACKET;
+ err = ice_tc_setup_drop_action(filter_dev, fltr);
+ if (err)
+ return err;
+
break;
case FLOW_ACTION_REDIRECT:
- fltr->action.fltr_act = ICE_FWD_TO_VSI;
-
- if (ice_is_port_repr_netdev(act->dev)) {
- repr = ice_netdev_to_repr(act->dev);
-
- fltr->dest_vsi = repr->src_vsi;
- fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
- } else if (netif_is_ice(act->dev) ||
- ice_is_tunnel_supported(act->dev)) {
- fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
- } else {
- NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
- return -EINVAL;
- }
+ err = ice_tc_setup_redirect_action(filter_dev, fltr, act->dev);
+ if (err)
+ return err;
break;
@@ -696,10 +747,6 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
goto exit;
}
- /* egress traffic is always redirect to uplink */
- if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
- fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
-
rule_info.sw_act.fltr_act = fltr->action.fltr_act;
if (fltr->action.fltr_act != ICE_DROP_PACKET)
rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
@@ -713,13 +760,21 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.flags_info.act_valid = true;
if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ /* Uplink to VF */
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
- } else {
+ } else if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS &&
+ fltr->dest_vsi == vsi->back->switchdev.uplink_vsi) {
+ /* VF to Uplink */
rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ } else {
+ /* VF to VF */
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LB_ENABLE;
}
/* specify the cookie as filter_rule_id */
@@ -1343,24 +1398,24 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
dissector = rule->match.dissector;
if (dissector->used_keys &
- ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_BASIC) |
- BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_VLAN) |
- BIT(FLOW_DISSECTOR_KEY_CVLAN) |
- BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
- BIT(FLOW_DISSECTOR_KEY_IP) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
- BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PPPOE) |
- BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
+ ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_CVLAN) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1382,10 +1437,10 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
*/
headers = &fltr->inner_headers;
} else if (dissector->used_keys &
- (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
- BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
- BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ (BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT_ULL(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
return -EOPNOTSUPP;
} else {
@@ -1745,16 +1800,17 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
/**
* ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @filter_dev: Pointer to device on which filter is being added
* @vsi: Pointer to VSI
* @cls_flower: Pointer to TC flower offload structure
* @fltr: Pointer to TC flower filter structure
*
* Parse the actions for a TC filter
*/
-static int
-ice_parse_tc_flower_actions(struct ice_vsi *vsi,
- struct flow_cls_offload *cls_flower,
- struct ice_tc_flower_fltr *fltr)
+static int ice_parse_tc_flower_actions(struct net_device *filter_dev,
+ struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
{
struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
struct flow_action *flow_action = &rule->action;
@@ -1769,7 +1825,7 @@ ice_parse_tc_flower_actions(struct ice_vsi *vsi,
flow_action_for_each(i, act, flow_action) {
if (ice_is_eswitch_mode_switchdev(vsi->back))
- err = ice_eswitch_tc_parse_action(fltr, act);
+ err = ice_eswitch_tc_parse_action(filter_dev, fltr, act);
else
err = ice_tc_parse_action(vsi, fltr, act);
if (err)
@@ -1856,7 +1912,7 @@ ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
if (err < 0)
goto err;
- err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ err = ice_parse_tc_flower_actions(netdev, vsi, f, fltr);
if (err < 0)
goto err;
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index ae98d5a8ff60..b2f5c9fe0149 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -21,6 +21,7 @@
#define _ICE_TRACE_H_
#include <linux/tracepoint.h>
+#include "ice_eswitch_br.h"
/* ice_trace() macro enables shared code to refer to trace points
* like:
@@ -240,6 +241,95 @@ DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_req);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_fw_done);
DEFINE_TX_TSTAMP_OP_EVENT(ice_tx_tstamp_complete);
+DECLARE_EVENT_CLASS(ice_esw_br_fdb_template,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb),
+ TP_STRUCT__entry(__array(char, dev_name, IFNAMSIZ)
+ __array(unsigned char, addr, ETH_ALEN)
+ __field(u16, vid)
+ __field(int, flags)),
+ TP_fast_assign(strscpy(__entry->dev_name,
+ netdev_name(fdb->dev),
+ IFNAMSIZ);
+ memcpy(__entry->addr, fdb->data.addr, ETH_ALEN);
+ __entry->vid = fdb->data.vid;
+ __entry->flags = fdb->flags;),
+ TP_printk("net_device=%s addr=%pM vid=%u flags=%x",
+ __entry->dev_name,
+ __entry->addr,
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_create,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DEFINE_EVENT(ice_esw_br_fdb_template,
+ ice_eswitch_br_fdb_entry_find_and_delete,
+ TP_PROTO(struct ice_esw_br_fdb_entry *fdb),
+ TP_ARGS(fdb)
+);
+
+DECLARE_EVENT_CLASS(ice_esw_br_vlan_template,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan),
+ TP_STRUCT__entry(__field(u16, vid)
+ __field(u16, flags)),
+ TP_fast_assign(__entry->vid = vlan->vid;
+ __entry->flags = vlan->flags;),
+ TP_printk("vid=%u flags=%x",
+ __entry->vid,
+ __entry->flags)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_create,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+DEFINE_EVENT(ice_esw_br_vlan_template,
+ ice_eswitch_br_vlan_cleanup,
+ TP_PROTO(struct ice_esw_br_vlan *vlan),
+ TP_ARGS(vlan)
+);
+
+#define ICE_ESW_BR_PORT_NAME_L 16
+
+DECLARE_EVENT_CLASS(ice_esw_br_port_template,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port),
+ TP_STRUCT__entry(__field(u16, vport_num)
+ __array(char, port_type, ICE_ESW_BR_PORT_NAME_L)),
+ TP_fast_assign(__entry->vport_num = port->vsi_idx;
+ if (port->type == ICE_ESWITCH_BR_UPLINK_PORT)
+ strscpy(__entry->port_type,
+ "Uplink",
+ ICE_ESW_BR_PORT_NAME_L);
+ else
+ strscpy(__entry->port_type,
+ "VF Representor",
+ ICE_ESW_BR_PORT_NAME_L);),
+ TP_printk("vport_num=%u port type=%s",
+ __entry->vport_num,
+ __entry->port_type)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_link,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
+DEFINE_EVENT(ice_esw_br_port_template,
+ ice_eswitch_br_port_unlink,
+ TP_PROTO(struct ice_esw_br_port *port),
+ TP_ARGS(port)
+);
+
/* End tracepoints */
#endif /* _ICE_TRACE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 52d0a126eb61..9e97ea863068 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2306,9 +2306,6 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
return;
- if (!tx_ring->ptp_tx)
- return;
-
/* Tx timestamps cannot be sampled when doing TSO */
if (first->tx_flags & ICE_TX_FLAGS_TSO)
return;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 166413fc33f4..daf7b9dbb143 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -380,7 +380,6 @@ struct ice_tx_ring {
#define ICE_TX_FLAGS_RING_VLAN_L2TAG2 BIT(2)
u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index c8322fb6f2b3..7e06373e14d9 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -450,7 +450,7 @@ void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res,
struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[first_idx];
if (xdp_res & ICE_XDP_REDIR)
- xdp_do_flush_map();
+ xdp_do_flush();
if (xdp_res & ICE_XDP_TX) {
if (static_branch_unlikely(&ice_xdp_locking_key))
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index a09556e57803..a18ca0ff879f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2018, Intel Corporation. */
+/* Copyright (c) 2018-2023, Intel Corporation. */
#ifndef _ICE_TYPE_H_
#define _ICE_TYPE_H_
@@ -129,6 +129,7 @@ enum ice_set_fc_aq_failures {
enum ice_mac_type {
ICE_MAC_UNKNOWN = 0,
ICE_MAC_E810,
+ ICE_MAC_E830,
ICE_MAC_GENERIC,
};
@@ -277,6 +278,8 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
+ u8 roce_lag;
+ u8 sriov_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;
@@ -820,6 +823,13 @@ struct ice_mbx_data {
u16 async_watermark_val;
};
+/* PHY model */
+enum ice_phy_model {
+ ICE_PHY_UNSUP = -1,
+ ICE_PHY_E810 = 1,
+ ICE_PHY_E822,
+};
+
/* Port hardware description */
struct ice_hw {
u8 __iomem *hw_addr;
@@ -841,6 +851,7 @@ struct ice_hw {
u8 revision_id;
u8 pf_id; /* device profile info */
+ enum ice_phy_model phy_model;
u16 max_burst_size; /* driver sets this value */
@@ -899,17 +910,20 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
-#define ICE_PHY_PER_NAC 1
-#define ICE_MAX_QUAD 2
-#define ICE_NUM_QUAD_TYPE 2
-#define ICE_PORTS_PER_QUAD 4
-#define ICE_PHY_0_LAST_QUAD 1
-#define ICE_PORTS_PER_PHY 8
-#define ICE_NUM_EXTERNAL_PORTS ICE_PORTS_PER_PHY
+#define ICE_PHY_PER_NAC_E822 1
+#define ICE_MAX_QUAD 2
+#define ICE_QUADS_PER_PHY_E822 2
+#define ICE_PORTS_PER_PHY_E822 8
+#define ICE_PORTS_PER_QUAD 4
+#define ICE_PORTS_PER_PHY_E810 4
+#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD)
/* Active package version (currently active) */
struct ice_pkg_ver active_pkg_ver;
+ u32 pkg_seg_id;
+ u32 pkg_sign_type;
u32 active_track_id;
+ u8 pkg_has_signing_seg:1;
u8 active_pkg_name[ICE_PKG_NAME_SIZE];
u8 active_pkg_in_nvm;
@@ -963,6 +977,7 @@ struct ice_hw {
DECLARE_BITMAP(hw_ptype, ICE_FLOW_PTYPE_MAX);
u8 dvm_ena;
u16 io_expander_handle;
+ u8 cgu_part_number;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
@@ -1033,14 +1048,15 @@ enum ice_sw_fwd_act_type {
ICE_FWD_TO_Q,
ICE_FWD_TO_QGRP,
ICE_DROP_PACKET,
+ ICE_NOP,
ICE_INVAL_ACT
};
struct ice_aq_get_set_rss_lut_params {
- u16 vsi_handle; /* software VSI handle */
- u16 lut_size; /* size of the LUT buffer */
- u8 lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
u8 *lut; /* input RSS LUT for set and output RSS LUT for get */
+ enum ice_lut_size lut_size; /* size of the LUT buffer */
+ enum ice_lut_type lut_type; /* type of the LUT (i.e. VSI, PF, Global) */
+ u16 vsi_handle; /* software VSI handle */
u8 global_lut_id; /* only valid when lut_type is global */
};
@@ -1142,9 +1158,6 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_SR_WORDS_IN_1KB 512
-/* Hash redirection LUT for VSI - maximum array size */
-#define ICE_VSIQF_HLUT_ARRAY_SIZE ((VSIQF_HLUT_MAX_INDEX + 1) * 4)
-
/* AQ API version for LLDP_FILTER_CONTROL */
#define ICE_FW_API_LLDP_FLTR_MAJ 1
#define ICE_FW_API_LLDP_FLTR_MIN 7
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
index ea3310be8354..b7ae09952156 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c
@@ -56,6 +56,8 @@ static void ice_release_vf(struct kref *ref)
{
struct ice_vf *vf = container_of(ref, struct ice_vf, refcnt);
+ pci_dev_put(vf->vfdev);
+
vf->vf_ops->free(vf);
}
@@ -304,6 +306,237 @@ static int ice_vf_rebuild_vsi(struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
+ * @vf: VF to add MAC filters for
+ * @vsi: Pointer to VSI
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds either a VLAN 0 or port VLAN based filter after reset.
+ */
+static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ int err;
+
+ if (ice_vf_is_port_vlan_ena(vf)) {
+ err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
+ if (err) {
+ dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
+ vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
+ } else {
+ err = ice_vsi_add_vlan_zero(vsi);
+ }
+
+ if (err) {
+ dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
+ ice_vf_is_port_vlan_ena(vf) ?
+ ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
+ return err;
+ }
+
+ err = vlan_ops->ena_rx_filtering(vsi);
+ if (err)
+ dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
+ vf->vf_id, vsi->idx, err);
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
+ * @vf: VF to configure trust setting for
+ */
+static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
+{
+ assign_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps, vf->trusted);
+}
+
+/**
+ * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
+ * @vf: VF to add MAC filters for
+ *
+ * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
+ * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
+ */
+static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ u8 broadcast[ETH_ALEN];
+ int status;
+
+ if (WARN_ON(!vsi))
+ return -EINVAL;
+
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
+ eth_broadcast_addr(broadcast);
+ status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
+ vf->vf_id, status);
+ return status;
+ }
+
+ vf->num_mac++;
+
+ if (is_valid_ether_addr(vf->hw_lan_addr)) {
+ status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
+ ICE_FWD_TO_VSI);
+ if (status) {
+ dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
+ &vf->hw_lan_addr[0], vf->vf_id,
+ status);
+ return status;
+ }
+ vf->num_mac++;
+
+ ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
+ * @vsi: Pointer to VSI
+ *
+ * This function moves VSI into corresponding scheduler aggregator node
+ * based on cached value of "aggregator node info" per VSI
+ */
+static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+ int status;
+
+ if (!vsi->agg_node)
+ return;
+
+ dev = ice_pf_to_dev(pf);
+ if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
+ dev_dbg(dev,
+ "agg_id %u already has reached max_num_vsis %u\n",
+ vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
+ return;
+ }
+
+ status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
+ vsi->idx, vsi->tc_cfg.ena_tc);
+ if (status)
+ dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
+ vsi->idx, vsi->agg_node->agg_id);
+ else
+ vsi->agg_node->num_vsis++;
+}
+
+/**
+ * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
+ * @vf: VF to rebuild host configuration on
+ */
+static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+
+ if (WARN_ON(!vsi))
+ return;
+
+ ice_vf_set_host_trust_cfg(vf);
+
+ if (ice_vf_rebuild_host_mac_cfg(vf))
+ dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
+ dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
+ if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
+ dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
+ vf->vf_id);
+
+ /* rebuild aggregator node config for main VF VSI */
+ ice_vf_rebuild_aggregator_node_cfg(vsi);
+}
+
+/**
+ * ice_set_vf_state_qs_dis - Set VF queues state to disabled
+ * @vf: pointer to the VF structure
+ */
+static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
+{
+ /* Clear Rx/Tx enabled queues flag */
+ bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
+ bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
+ clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+}
+
+/**
+ * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
+ * @vf: VF to set in initialized state
+ *
+ * After this function the VF will be ready to receive/handle the
+ * VIRTCHNL_OP_GET_VF_RESOURCES message
+ */
+static void ice_vf_set_initialized(struct ice_vf *vf)
+{
+ ice_set_vf_state_qs_dis(vf);
+ clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
+ clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
+ set_bit(ICE_VF_STATE_INIT, vf->vf_states);
+ memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
+}
+
+/**
* ice_vf_post_vsi_rebuild - Reset tasks that occur after VSI rebuild
* @vf: the VF being reset
*
@@ -596,12 +829,16 @@ static void ice_notify_vf_reset(struct ice_vf *vf)
int ice_reset_vf(struct ice_vf *vf, u32 flags)
{
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
struct device *dev;
int err = 0;
bool rsd;
dev = ice_pf_to_dev(pf);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
if (flags & ICE_VF_RESET_NOTIFY)
ice_notify_vf_reset(vf);
@@ -612,6 +849,17 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags)
return 0;
}
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (flags & ICE_VF_RESET_LOCK)
mutex_lock(&vf->cfg_lock);
else
@@ -704,19 +952,12 @@ out_unlock:
if (flags & ICE_VF_RESET_LOCK)
mutex_unlock(&vf->cfg_lock);
- return err;
-}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
-/**
- * ice_set_vf_state_qs_dis - Set VF queues state to disabled
- * @vf: pointer to the VF structure
- */
-static void ice_set_vf_state_qs_dis(struct ice_vf *vf)
-{
- /* Clear Rx/Tx enabled queues flag */
- bitmap_zero(vf->txq_ena, ICE_MAX_RSS_QS_PER_VF);
- bitmap_zero(vf->rxq_ena, ICE_MAX_RSS_QS_PER_VF);
- clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
+ return err;
}
/**
@@ -960,211 +1201,6 @@ bool ice_is_vf_link_up(struct ice_vf *vf)
}
/**
- * ice_vf_set_host_trust_cfg - set trust setting based on pre-reset value
- * @vf: VF to configure trust setting for
- */
-static void ice_vf_set_host_trust_cfg(struct ice_vf *vf)
-{
- if (vf->trusted)
- set_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
- else
- clear_bit(ICE_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
-}
-
-/**
- * ice_vf_rebuild_host_mac_cfg - add broadcast and the VF's perm_addr/LAA
- * @vf: VF to add MAC filters for
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds a broadcast filter and the VF's perm_addr/LAA after reset.
- */
-static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- u8 broadcast[ETH_ALEN];
- int status;
-
- if (WARN_ON(!vsi))
- return -EINVAL;
-
- if (ice_is_eswitch_mode_switchdev(vf->pf))
- return 0;
-
- eth_broadcast_addr(broadcast);
- status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add broadcast MAC filter for VF %u, error %d\n",
- vf->vf_id, status);
- return status;
- }
-
- vf->num_mac++;
-
- if (is_valid_ether_addr(vf->hw_lan_addr)) {
- status = ice_fltr_add_mac(vsi, vf->hw_lan_addr,
- ICE_FWD_TO_VSI);
- if (status) {
- dev_err(dev, "failed to add default unicast MAC filter %pM for VF %u, error %d\n",
- &vf->hw_lan_addr[0], vf->vf_id,
- status);
- return status;
- }
- vf->num_mac++;
-
- ether_addr_copy(vf->dev_lan_addr, vf->hw_lan_addr);
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
- * @vf: VF to add MAC filters for
- * @vsi: Pointer to VSI
- *
- * Called after a VF VSI has been re-added/rebuilt during reset. The PF driver
- * always re-adds either a VLAN 0 or port VLAN based filter after reset.
- */
-static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
-{
- struct ice_vsi_vlan_ops *vlan_ops = ice_get_compat_vsi_vlan_ops(vsi);
- struct device *dev = ice_pf_to_dev(vf->pf);
- int err;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- err = vlan_ops->set_port_vlan(vsi, &vf->port_vlan_info);
- if (err) {
- dev_err(dev, "failed to configure port VLAN via VSI parameters for VF %u, error %d\n",
- vf->vf_id, err);
- return err;
- }
-
- err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
- } else {
- err = ice_vsi_add_vlan_zero(vsi);
- }
-
- if (err) {
- dev_err(dev, "failed to add VLAN %u filter for VF %u during VF rebuild, error %d\n",
- ice_vf_is_port_vlan_ena(vf) ?
- ice_vf_get_port_vlan_id(vf) : 0, vf->vf_id, err);
- return err;
- }
-
- err = vlan_ops->ena_rx_filtering(vsi);
- if (err)
- dev_warn(dev, "failed to enable Rx VLAN filtering for VF %d VSI %d during VF rebuild, error %d\n",
- vf->vf_id, vsi->idx, err);
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
- * @vf: VF to re-apply the configuration for
- *
- * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
- * needs to re-apply the host configured Tx rate limiting configuration.
- */
-static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
- int err;
-
- if (WARN_ON(!vsi))
- return -EINVAL;
-
- if (vf->min_tx_rate) {
- err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
- vf->min_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- if (vf->max_tx_rate) {
- err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
- if (err) {
- dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
- vf->max_tx_rate, vf->vf_id, err);
- return err;
- }
- }
-
- return 0;
-}
-
-/**
- * ice_vf_rebuild_aggregator_node_cfg - rebuild aggregator node config
- * @vsi: Pointer to VSI
- *
- * This function moves VSI into corresponding scheduler aggregator node
- * based on cached value of "aggregator node info" per VSI
- */
-static void ice_vf_rebuild_aggregator_node_cfg(struct ice_vsi *vsi)
-{
- struct ice_pf *pf = vsi->back;
- struct device *dev;
- int status;
-
- if (!vsi->agg_node)
- return;
-
- dev = ice_pf_to_dev(pf);
- if (vsi->agg_node->num_vsis == ICE_MAX_VSIS_IN_AGG_NODE) {
- dev_dbg(dev,
- "agg_id %u already has reached max_num_vsis %u\n",
- vsi->agg_node->agg_id, vsi->agg_node->num_vsis);
- return;
- }
-
- status = ice_move_vsi_to_agg(pf->hw.port_info, vsi->agg_node->agg_id,
- vsi->idx, vsi->tc_cfg.ena_tc);
- if (status)
- dev_dbg(dev, "unable to move VSI idx %u into aggregator %u node",
- vsi->idx, vsi->agg_node->agg_id);
- else
- vsi->agg_node->num_vsis++;
-}
-
-/**
- * ice_vf_rebuild_host_cfg - host admin configuration is persistent across reset
- * @vf: VF to rebuild host configuration on
- */
-void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
-{
- struct device *dev = ice_pf_to_dev(vf->pf);
- struct ice_vsi *vsi = ice_get_vf_vsi(vf);
-
- if (WARN_ON(!vsi))
- return;
-
- ice_vf_set_host_trust_cfg(vf);
-
- if (ice_vf_rebuild_host_mac_cfg(vf))
- dev_err(dev, "failed to rebuild default MAC configuration for VF %d\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_vlan_cfg(vf, vsi))
- dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vf_rebuild_host_tx_rate_cfg(vf))
- dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
- vf->vf_id);
-
- if (ice_vsi_apply_spoofchk(vsi, vf->spoofchk))
- dev_err(dev, "failed to rebuild spoofchk configuration for VF %d\n",
- vf->vf_id);
-
- /* rebuild aggregator node config for main VF VSI */
- ice_vf_rebuild_aggregator_node_cfg(vsi);
-}
-
-/**
* ice_vf_ctrl_invalidate_vsi - invalidate ctrl_vsi_idx to remove VSI access
* @vf: VF that control VSI is being invalidated on
*/
@@ -1293,23 +1329,6 @@ void ice_vf_vsi_release(struct ice_vf *vf)
}
/**
- * ice_vf_set_initialized - VF is ready for VIRTCHNL communication
- * @vf: VF to set in initialized state
- *
- * After this function the VF will be ready to receive/handle the
- * VIRTCHNL_OP_GET_VF_RESOURCES message
- */
-void ice_vf_set_initialized(struct ice_vf *vf)
-{
- ice_set_vf_state_qs_dis(vf);
- clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
- clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
- set_bit(ICE_VF_STATE_INIT, vf->vf_states);
- memset(&vf->vlan_v2_caps, 0, sizeof(vf->vlan_v2_caps));
-}
-
-/**
* ice_get_vf_ctrl_vsi - Get first VF control VSI pointer
* @pf: the PF private structure
* @vsi: pointer to the VSI
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.h b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
index 48fea6fa0362..93c774f2f437 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.h
@@ -72,7 +72,7 @@ struct ice_vfs {
struct mutex table_lock; /* Lock for protecting the hash table */
u16 num_supported; /* max supported VFs on this PF */
u16 num_qps_per; /* number of queue pairs per VF */
- u16 num_msix_per; /* number of MSI-X vectors per VF */
+ u16 num_msix_per; /* default MSI-X vectors per VF */
unsigned long last_printed_mdd_jiffies; /* MDD message rate limit */
};
@@ -82,7 +82,7 @@ struct ice_vf {
struct rcu_head rcu;
struct kref refcnt;
struct ice_pf *pf;
-
+ struct pci_dev *vfdev;
/* Used during virtchnl message handling and NDO ops against the VF
* that will trigger a VFR
*/
@@ -123,6 +123,9 @@ struct ice_vf {
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vf_qs; /* num of queue configured per VF */
+ u8 vlan_strip_ena; /* Outer and Inner VLAN strip enable */
+#define ICE_INNER_VLAN_STRIP_ENA BIT(0)
+#define ICE_OUTER_VLAN_STRIP_ENA BIT(1)
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
@@ -133,6 +136,8 @@ struct ice_vf {
/* devlink port data */
struct devlink_port devlink_port;
+
+ u16 num_msix; /* num of MSI-X configured on this VF */
};
/* Flags for controlling behavior of ice_reset_vf */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
index 6f3293b793b5..0c7e77c0a09f 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_lib_private.h
@@ -32,13 +32,11 @@ int ice_vsi_apply_spoofchk(struct ice_vsi *vsi, bool enable);
bool ice_is_vf_trusted(struct ice_vf *vf);
bool ice_vf_has_no_qs_ena(struct ice_vf *vf);
bool ice_is_vf_link_up(struct ice_vf *vf);
-void ice_vf_rebuild_host_cfg(struct ice_vf *vf);
void ice_vf_ctrl_invalidate_vsi(struct ice_vf *vf);
void ice_vf_ctrl_vsi_release(struct ice_vf *vf);
struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf);
int ice_vf_init_host_cfg(struct ice_vf *vf, struct ice_vsi *vsi);
void ice_vf_invalidate_vsi(struct ice_vf *vf);
void ice_vf_vsi_release(struct ice_vf *vf);
-void ice_vf_set_initialized(struct ice_vf *vf);
#endif /* _ICE_VF_LIB_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
index b1ffb81893d4..80dc4bcdd3a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.c
@@ -21,6 +21,105 @@ noop_vlan(struct ice_vsi __always_unused *vsi)
return 0;
}
+static void ice_port_vlan_on(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ /* setup outer VLAN ops */
+ vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_outer_port_vlan;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops->add_vlan = noop_vlan_arg;
+ vlan_ops->del_vlan = noop_vlan_arg;
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+ } else {
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
+ vlan_ops->clear_port_vlan = ice_vsi_clear_inner_port_vlan;
+ }
+
+ /* all Rx traffic should be in the domain of the assigned port VLAN,
+ * so prevent disabling Rx VLAN filtering
+ */
+ vlan_ops->dis_rx_filtering = noop_vlan;
+
+ vlan_ops->ena_rx_filtering = ice_vsi_ena_rx_vlan_filtering;
+}
+
+static void ice_port_vlan_off(struct ice_vsi *vsi)
+{
+ struct ice_vsi_vlan_ops *vlan_ops;
+ struct ice_pf *pf = vsi->back;
+
+ /* setup inner VLAN ops */
+ vlan_ops = &vsi->inner_vlan_ops;
+
+ vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
+
+ if (ice_is_dvm_ena(&pf->hw)) {
+ vlan_ops = &vsi->outer_vlan_ops;
+
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
+ vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
+ vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
+ vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
+ } else {
+ vlan_ops->del_vlan = ice_vsi_del_vlan;
+ }
+
+ vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
+
+ if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
+ vlan_ops->ena_rx_filtering = noop_vlan;
+ else
+ vlan_ops->ena_rx_filtering =
+ ice_vsi_ena_rx_vlan_filtering;
+}
+
+/**
+ * ice_vf_vsi_enable_port_vlan - Set VSI VLAN ops to support port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function won't create port VLAN, it only allows to create port VLAN
+ * using VLAN ops on the VF VSI.
+ */
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_on(vsi);
+}
+
+/**
+ * ice_vf_vsi_disable_port_vlan - Clear VSI support for creating port VLAN
+ * @vsi: VF's VSI being configured
+ *
+ * The function should be called after removing port VLAN on VSI
+ * (using VLAN ops)
+ */
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi)
+{
+ if (WARN_ON_ONCE(!vsi->vf))
+ return;
+
+ ice_port_vlan_off(vsi);
+}
+
/**
* ice_vf_vsi_init_vlan_ops - Initialize default VSI VLAN ops for VF VSI
* @vsi: VF's VSI being configured
@@ -39,91 +138,17 @@ void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi)
if (WARN_ON(!vf))
return;
- if (ice_is_dvm_ena(&pf->hw)) {
- vlan_ops = &vsi->outer_vlan_ops;
+ if (ice_vf_is_port_vlan_ena(vf))
+ ice_port_vlan_on(vsi);
+ else
+ ice_port_vlan_off(vsi);
- /* outer VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- /* setup outer VLAN ops */
- vlan_ops->set_port_vlan = ice_vsi_set_outer_port_vlan;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
- vlan_ops->add_vlan = noop_vlan_arg;
- vlan_ops->del_vlan = noop_vlan_arg;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
-
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_outer_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_outer_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_outer_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_outer_insertion;
-
- /* setup inner VLAN ops */
- vlan_ops = &vsi->inner_vlan_ops;
-
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- } else {
- vlan_ops = &vsi->inner_vlan_ops;
+ vlan_ops = ice_is_dvm_ena(&pf->hw) ?
+ &vsi->outer_vlan_ops : &vsi->inner_vlan_ops;
- /* inner VLAN ops regardless of port VLAN config */
- vlan_ops->add_vlan = ice_vsi_add_vlan;
- vlan_ops->dis_rx_filtering = ice_vsi_dis_rx_vlan_filtering;
- vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
- vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
-
- if (ice_vf_is_port_vlan_ena(vf)) {
- vlan_ops->set_port_vlan = ice_vsi_set_inner_port_vlan;
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
- /* all Rx traffic should be in the domain of the
- * assigned port VLAN, so prevent disabling Rx VLAN
- * filtering
- */
- vlan_ops->dis_rx_filtering = noop_vlan;
- } else {
- vlan_ops->dis_rx_filtering =
- ice_vsi_dis_rx_vlan_filtering;
- if (!test_bit(ICE_FLAG_VF_VLAN_PRUNING, pf->flags))
- vlan_ops->ena_rx_filtering = noop_vlan;
- else
- vlan_ops->ena_rx_filtering =
- ice_vsi_ena_rx_vlan_filtering;
-
- vlan_ops->del_vlan = ice_vsi_del_vlan;
- vlan_ops->ena_stripping = ice_vsi_ena_inner_stripping;
- vlan_ops->dis_stripping = ice_vsi_dis_inner_stripping;
- vlan_ops->ena_insertion = ice_vsi_ena_inner_insertion;
- vlan_ops->dis_insertion = ice_vsi_dis_inner_insertion;
- }
- }
+ vlan_ops->add_vlan = ice_vsi_add_vlan;
+ vlan_ops->ena_tx_filtering = ice_vsi_ena_tx_vlan_filtering;
+ vlan_ops->dis_tx_filtering = ice_vsi_dis_tx_vlan_filtering;
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
index 875a4e615f39..df8aa09df3e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vf_vsi_vlan_ops.h
@@ -13,7 +13,11 @@ void ice_vf_vsi_cfg_svm_legacy_vlan_mode(struct ice_vsi *vsi);
#ifdef CONFIG_PCI_IOV
void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi);
+void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi);
+void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi);
#else
static inline void ice_vf_vsi_init_vlan_ops(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_enable_port_vlan(struct ice_vsi *vsi) { }
+static inline void ice_vf_vsi_disable_port_vlan(struct ice_vsi *vsi) { }
#endif /* CONFIG_PCI_IOV */
#endif /* _ICE_PF_VSI_VLAN_OPS_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index dcf628b1fccd..1c7b4ded948b 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -428,7 +428,7 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
goto err;
}
- len = sizeof(struct virtchnl_vf_resource);
+ len = virtchnl_struct_size(vfres, vsi_res, 0);
vfres = kzalloc(len, GFP_KERNEL);
if (!vfres) {
@@ -486,6 +486,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_CRC;
+
if (vf->driver_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
vfres->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
@@ -498,9 +501,9 @@ static int ice_vc_get_vf_res_msg(struct ice_vf *vf, u8 *msg)
vfres->num_vsis = 1;
/* Tx and Rx queue are equal for VF */
vfres->num_queue_pairs = vsi->num_txq;
- vfres->max_vectors = vf->pf->vfs.num_msix_per;
+ vfres->max_vectors = vf->num_msix;
vfres->rss_key_size = ICE_VSIQF_HKEY_ARRAY_SIZE;
- vfres->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vfres->rss_lut_size = ICE_LUT_VSI_SIZE;
vfres->max_mtu = ice_vc_get_max_frame_size(vf);
vfres->vsi_res[0].vsi_id = vf->lan_vsi_num;
@@ -962,7 +965,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (vrl->lut_entries != ICE_VSIQF_HLUT_ARRAY_SIZE) {
+ if (vrl->lut_entries != ICE_LUT_VSI_SIZE) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
@@ -978,7 +981,7 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
goto error_param;
}
- if (ice_set_rss_lut(vsi, vrl->lut, ICE_VSIQF_HLUT_ARRAY_SIZE))
+ if (ice_set_rss_lut(vsi, vrl->lut, ICE_LUT_VSI_SIZE))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_RSS_LUT, v_ret,
@@ -1520,7 +1523,6 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 num_q_vectors_mapped, vsi_id, vector_id;
struct virtchnl_irq_map_info *irqmap_info;
struct virtchnl_vector_map *map;
- struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
int i;
@@ -1532,7 +1534,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
- pf->vfs.num_msix_per < num_q_vectors_mapped ||
+ vf->num_msix < num_q_vectors_mapped ||
!num_q_vectors_mapped) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -1554,7 +1556,7 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
- if (!(vector_id < pf->vfs.num_msix_per) ||
+ if (!(vector_id < vf->num_msix) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -1600,9 +1602,24 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct ice_pf *pf = vf->pf;
+ struct ice_lag *lag;
struct ice_vsi *vsi;
+ u8 act_prt, pri_prt;
int i = -1, q_idx;
+ lag = pf->lag;
+ mutex_lock(&pf->lag_mutex);
+ act_prt = ICE_LAG_INVALID_PORT;
+ pri_prt = pf->hw.port_info->lport;
+ if (lag && lag->bonded && lag->primary) {
+ act_prt = lag->active_port;
+ if (act_prt != pri_prt && act_prt != ICE_LAG_INVALID_PORT &&
+ lag->upper_netdev)
+ ice_lag_move_vf_nodes_cfg(lag, act_prt, pri_prt);
+ else
+ act_prt = ICE_LAG_INVALID_PORT;
+ }
+
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
goto error_param;
@@ -1621,6 +1638,15 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
for (i = 0; i < qci->num_queue_pairs; i++) {
+ if (!qci->qpair[i].rxq.crc_disable)
+ continue;
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_CRC) ||
+ vf->vlan_strip_ena)
+ goto error_param;
+ }
+
+ for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
if (qpi->txq.vsi_id != qci->vsi_id ||
qpi->rxq.vsi_id != qci->vsi_id ||
@@ -1666,6 +1692,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
+ if (qpi->rxq.crc_disable)
+ vsi->rx_rings[q_idx]->flags |=
+ ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[q_idx]->flags &=
+ ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024))
@@ -1710,6 +1743,11 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
}
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_SUCCESS, NULL, 0);
@@ -1724,6 +1762,13 @@ error_param:
vf->vf_id, i);
}
+ if (lag && lag->bonded && lag->primary &&
+ act_prt != ICE_LAG_INVALID_PORT)
+ ice_lag_move_vf_nodes_cfg(lag, pri_prt, act_prt);
+ mutex_unlock(&pf->lag_mutex);
+
+ ice_lag_move_new_vf_nodes(vf);
+
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
VIRTCHNL_STATUS_ERR_PARAM, NULL, 0);
@@ -2409,6 +2454,21 @@ static int ice_vc_remove_vlan_msg(struct ice_vf *vf, u8 *msg)
}
/**
+ * ice_vsi_is_rxq_crc_strip_dis - check if Rx queue CRC strip is disabled or not
+ * @vsi: pointer to the VF VSI info
+ */
+static bool ice_vsi_is_rxq_crc_strip_dis(struct ice_vsi *vsi)
+{
+ unsigned int i;
+
+ ice_for_each_alloc_rxq(vsi, i)
+ if (vsi->rx_rings[i]->flags & ICE_RX_FLAGS_CRC_STRIP_DIS)
+ return true;
+
+ return false;
+}
+
+/**
* ice_vc_ena_vlan_stripping
* @vf: pointer to the VF info
*
@@ -2437,6 +2497,8 @@ static int ice_vc_ena_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
@@ -2472,6 +2534,8 @@ static int ice_vc_dis_vlan_stripping(struct ice_vf *vf)
if (vsi->inner_vlan_ops.dis_stripping(vsi))
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ else
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
error_param:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
@@ -2615,12 +2679,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- /* Read flexiflag registers to determine whether the
- * corresponding RXDID is configured and supported or not.
- * Since Legacy 16byte descriptor format is not supported,
- * start from Legacy 32byte descriptor.
+ /* RXDIDs supported by DDP package can be read from the register
+ * to get the supported RXDID bitmap. But the legacy 32byte RXDID
+ * is not listed in DDP package, add it in the bitmap manually.
+ * Legacy 16byte descriptor is not supported.
*/
- for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
+
+ for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
@@ -2647,6 +2713,8 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
{
struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ vf->vlan_strip_ena = 0;
+
if (!vsi)
return -EINVAL;
@@ -2656,10 +2724,16 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
if (ice_vf_is_port_vlan_ena(vf) && !ice_is_dvm_ena(&vsi->back->hw))
return 0;
- if (ice_vf_vlan_offload_ena(vf->driver_caps))
- return vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
- else
- return vsi->inner_vlan_ops.dis_stripping(vsi);
+ if (ice_vf_vlan_offload_ena(vf->driver_caps)) {
+ int err;
+
+ err = vsi->inner_vlan_ops.ena_stripping(vsi, ETH_P_8021Q);
+ if (!err)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+ return err;
+ }
+
+ return vsi->inner_vlan_ops.dis_stripping(vsi);
}
static u16 ice_vc_get_max_vlan_fltrs(struct ice_vf *vf)
@@ -3433,6 +3507,11 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ice_vsi_is_rxq_crc_strip_dis(vsi)) {
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto out;
+ }
+
ethertype_setting = strip_msg->outer_ethertype_setting;
if (ethertype_setting) {
if (ice_vc_ena_vlan_offload(vsi,
@@ -3453,6 +3532,8 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* enabled, is extracted in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena |= ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3464,6 +3545,9 @@ static int ice_vc_ena_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena |= ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
@@ -3525,6 +3609,8 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
* in L2TAG1.
*/
ice_vsi_update_l2tsel(vsi, l2tsel);
+
+ vf->vlan_strip_ena &= ~ICE_OUTER_VLAN_STRIP_ENA;
}
}
@@ -3534,6 +3620,9 @@ static int ice_vc_dis_vlan_stripping_v2_msg(struct ice_vf *vf, u8 *msg)
goto out;
}
+ if (ethertype_setting)
+ vf->vlan_strip_ena &= ~ICE_INNER_VLAN_STRIP_ENA;
+
out:
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING_V2,
v_ret, NULL, 0);
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
index daa6a1e894cf..24b23b7ef04a 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_fdir.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021, Intel Corporation. */
+/* Copyright (C) 2021-2023, Intel Corporation. */
#include "ice.h"
#include "ice_base.h"
@@ -1422,8 +1422,8 @@ ice_vc_fdir_irq_handler(struct ice_vsi *ctrl_vsi,
*/
static void ice_vf_fdir_dump_info(struct ice_vf *vf)
{
+ u32 fd_size, fd_cnt, fd_size_g, fd_cnt_g, fd_size_b, fd_cnt_b;
struct ice_vsi *vf_vsi;
- u32 fd_size, fd_cnt;
struct device *dev;
struct ice_pf *pf;
struct ice_hw *hw;
@@ -1442,12 +1442,25 @@ static void ice_vf_fdir_dump_info(struct ice_vf *vf)
fd_size = rd32(hw, VSIQF_FD_SIZE(vsi_num));
fd_cnt = rd32(hw, VSIQF_FD_CNT(vsi_num));
- dev_dbg(dev, "VF %d: space allocated: guar:0x%x, be:0x%x, space consumed: guar:0x%x, be:0x%x\n",
- vf->vf_id,
- (fd_size & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
- (fd_size & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S,
- (fd_cnt & VSIQF_FD_CNT_FD_GCNT_M) >> VSIQF_FD_CNT_FD_GCNT_S,
- (fd_cnt & VSIQF_FD_CNT_FD_BCNT_M) >> VSIQF_FD_CNT_FD_BCNT_S);
+ switch (hw->mac_type) {
+ case ICE_MAC_E830:
+ fd_size_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
+ fd_size_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
+ fd_cnt_g = FIELD_GET(E830_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
+ fd_cnt_b = FIELD_GET(E830_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
+ break;
+ case ICE_MAC_E810:
+ default:
+ fd_size_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_size);
+ fd_size_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_size);
+ fd_cnt_g = FIELD_GET(E800_VSIQF_FD_CNT_FD_GCNT_M, fd_cnt);
+ fd_cnt_b = FIELD_GET(E800_VSIQF_FD_CNT_FD_BCNT_M, fd_cnt);
+ }
+
+ dev_dbg(dev, "VF %d: Size in the FD table: guaranteed:0x%x, best effort:0x%x\n",
+ vf->vf_id, fd_size_g, fd_size_b);
+ dev_dbg(dev, "VF %d: Filter counter in the FD table: guaranteed:0x%x, best effort:0x%x\n",
+ vf->vf_id, fd_cnt_g, fd_cnt_b);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
index 5b4a0abb4607..76266e709a39 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
@@ -202,6 +202,24 @@ int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi)
return ice_vsi_manage_vlan_insertion(vsi);
}
+static void
+ice_save_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ vlan->sw_flags2 = info->sw_flags2;
+ vlan->inner_vlan_flags = info->inner_vlan_flags;
+ vlan->outer_vlan_flags = info->outer_vlan_flags;
+}
+
+static void
+ice_restore_vlan_info(struct ice_aqc_vsi_props *info,
+ struct ice_vsi_vlan_info *vlan)
+{
+ info->sw_flags2 = vlan->sw_flags2;
+ info->inner_vlan_flags = vlan->inner_vlan_flags;
+ info->outer_vlan_flags = vlan->outer_vlan_flags;
+}
+
/**
* __ice_vsi_set_inner_port_vlan - set port VLAN VSI context settings to enable a port VLAN
* @vsi: the VSI to update
@@ -218,6 +236,7 @@ static int __ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, u16 pvid_info)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
info = &ctxt->info;
info->inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ACCEPTUNTAGGED |
@@ -259,6 +278,33 @@ int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_inner_port_vlan(vsi, port_vlan_info);
}
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_aqc_vsi_props *info;
+ struct ice_vsi_ctx *ctxt;
+ int ret;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_inner_vlan = 0;
+ ctxt->info = vsi->info;
+ info = &ctxt->info;
+ info->valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (ret)
+ dev_err(ice_hw_to_dev(hw), "update VSI for port VLAN failed, err %d aq_err %s\n",
+ ret, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return ret;
+}
+
/**
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
@@ -647,6 +693,7 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
if (!ctxt)
return -ENOMEM;
+ ice_save_vlan_info(&vsi->info, &vsi->vlan_info);
ctxt->info = vsi->info;
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
@@ -689,9 +736,6 @@ __ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, u16 vlan_info, u16 tpid)
* used if DVM is supported. Also, this function should never be called directly
* as it should be part of ice_vsi_vlan_ops if it's needed.
*
- * This function does not support clearing the port VLAN as there is currently
- * no use case for this.
- *
* Use the ice_vlan structure passed in to set this VSI in a port VLAN.
*/
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
@@ -705,3 +749,37 @@ int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
return __ice_vsi_set_outer_port_vlan(vsi, port_vlan_info, vlan->tpid);
}
+
+/**
+ * ice_vsi_clear_outer_port_vlan - clear outer port vlan
+ * @vsi: VSI to configure
+ *
+ * The function is restoring previously set vlan config (saved in
+ * vsi->vlan_info). Setting happens in port vlan configuration.
+ */
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_vsi_ctx *ctxt;
+ int err;
+
+ ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
+ if (!ctxt)
+ return -ENOMEM;
+
+ ice_restore_vlan_info(&vsi->info, &vsi->vlan_info);
+ vsi->info.port_based_outer_vlan = 0;
+ ctxt->info = vsi->info;
+
+ ctxt->info.valid_sections =
+ cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
+ ICE_AQ_VSI_PROP_SW_VALID);
+
+ err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing outer port based VLAN failed, err %d aq_err %s\n",
+ err, ice_aq_str(hw->adminq.sq_last_status));
+
+ kfree(ctxt);
+ return err;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
index f459909490ec..f0d84d11bd5b 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.h
@@ -7,6 +7,12 @@
#include <linux/types.h>
#include "ice_vlan.h"
+struct ice_vsi_vlan_info {
+ u8 sw_flags2;
+ u8 inner_vlan_flags;
+ u8 outer_vlan_flags;
+};
+
struct ice_vsi;
int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
@@ -17,6 +23,7 @@ int ice_vsi_dis_inner_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_inner_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_inner_insertion(struct ice_vsi *vsi);
int ice_vsi_set_inner_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_inner_port_vlan(struct ice_vsi *vsi);
int ice_vsi_ena_rx_vlan_filtering(struct ice_vsi *vsi);
int ice_vsi_dis_rx_vlan_filtering(struct ice_vsi *vsi);
@@ -28,5 +35,6 @@ int ice_vsi_dis_outer_stripping(struct ice_vsi *vsi);
int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
+int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
index 5b47568f6256..b2d2330dedcb 100644
--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
+++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_ops.h
@@ -21,6 +21,7 @@ struct ice_vsi_vlan_ops {
int (*ena_tx_filtering)(struct ice_vsi *vsi);
int (*dis_tx_filtering)(struct ice_vsi *vsi);
int (*set_port_vlan)(struct ice_vsi *vsi, struct ice_vlan *vlan);
+ int (*clear_port_vlan)(struct ice_vsi *vsi);
};
void ice_vsi_init_vlan_ops(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index a7fe2b4ce655..99954508184f 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -217,21 +217,16 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
*/
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
- struct ice_aqc_add_tx_qgrp *qg_buf;
+ DEFINE_FLEX(struct ice_aqc_add_tx_qgrp, qg_buf, txqs, 1);
+ u16 size = __struct_size(qg_buf);
struct ice_q_vector *q_vector;
struct ice_tx_ring *tx_ring;
struct ice_rx_ring *rx_ring;
- u16 size;
int err;
if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
return -EINVAL;
- size = struct_size(qg_buf, txqs, 1);
- qg_buf = kzalloc(size, GFP_KERNEL);
- if (!qg_buf)
- return -ENOMEM;
-
qg_buf->num_txqs = 1;
tx_ring = vsi->tx_rings[q_idx];
@@ -240,7 +235,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
if (ice_is_xdp_ena_vsi(vsi)) {
struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
@@ -249,29 +244,28 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
qg_buf->num_txqs = 1;
err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
if (err)
- goto free_buf;
+ return err;
ice_set_ring_xdp(xdp_ring);
ice_tx_xsk_pool(vsi, q_idx);
}
err = ice_vsi_cfg_rxq(rx_ring);
if (err)
- goto free_buf;
+ return err;
ice_qvec_cfg_msix(vsi, q_vector);
err = ice_vsi_ctrl_one_rx_ring(vsi, true, q_idx, true);
if (err)
- goto free_buf;
+ return err;
clear_bit(ICE_CFG_BUSY, vsi->state);
ice_qvec_toggle_napi(vsi, q_vector, true);
ice_qvec_ena_irq(vsi, q_vector);
netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
-free_buf:
- kfree(qg_buf);
- return err;
+
+ return 0;
}
/**
@@ -546,19 +540,6 @@ bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
}
/**
- * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
- * @rx_ring: Rx ring
- */
-static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
-{
- int ntc = rx_ring->next_to_clean + 1;
-
- ntc = (ntc < rx_ring->count) ? ntc : 0;
- rx_ring->next_to_clean = ntc;
- prefetch(ICE_RX_DESC(rx_ring, ntc));
-}
-
-/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
* @xdp: Pointer to XDP buffer
@@ -572,8 +553,14 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
{
unsigned int totalsize = xdp->data_end - xdp->data_meta;
unsigned int metasize = xdp->data - xdp->data_meta;
+ struct skb_shared_info *sinfo = NULL;
struct sk_buff *skb;
+ u32 nr_frags = 0;
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ }
net_prefetch(xdp->data_meta);
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize,
@@ -589,6 +576,29 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
__skb_pull(skb, metasize);
}
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+ for (int i = 0; i < nr_frags; i++) {
+ struct skb_shared_info *skinfo = skb_shinfo(skb);
+ skb_frag_t *frag = &sinfo->frags[i];
+ struct page *page;
+ void *addr;
+
+ page = dev_alloc_page();
+ if (!page) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+ addr = page_to_virt(page);
+
+ memcpy(addr, skb_frag_page(frag), skb_frag_size(frag));
+
+ __skb_fill_page_desc_noacc(skinfo, skinfo->nr_frags++,
+ addr, 0, skb_frag_size(frag));
+ }
+
+out:
xsk_buff_free(xdp);
return skb;
}
@@ -597,7 +607,7 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp)
* ice_clean_xdp_irq_zc - produce AF_XDP descriptors to CQ
* @xdp_ring: XDP Tx ring
*/
-static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
+static u32 ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean;
struct ice_tx_desc *tx_desc;
@@ -619,7 +629,7 @@ static void ice_clean_xdp_irq_zc(struct ice_tx_ring *xdp_ring)
}
if (!completed_frames)
- return;
+ return 0;
if (likely(!xdp_ring->xdp_tx_active)) {
xsk_frames = completed_frames;
@@ -649,6 +659,8 @@ skip:
xdp_ring->next_to_clean -= cnt;
if (xsk_frames)
xsk_tx_completed(xdp_ring->xsk_pool, xsk_frames);
+
+ return completed_frames;
}
/**
@@ -666,37 +678,72 @@ skip:
static int ice_xmit_xdp_tx_zc(struct xdp_buff *xdp,
struct ice_tx_ring *xdp_ring)
{
+ struct skb_shared_info *sinfo = NULL;
u32 size = xdp->data_end - xdp->data;
u32 ntu = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
- dma_addr_t dma;
-
- if (ICE_DESC_UNUSED(xdp_ring) < ICE_RING_QUARTER(xdp_ring)) {
- ice_clean_xdp_irq_zc(xdp_ring);
- if (!ICE_DESC_UNUSED(xdp_ring)) {
- xdp_ring->ring_stats->tx_stats.tx_busy++;
- return ICE_XDP_CONSUMED;
- }
+ struct xdp_buff *head;
+ u32 nr_frags = 0;
+ u32 free_space;
+ u32 frag = 0;
+
+ free_space = ICE_DESC_UNUSED(xdp_ring);
+ if (free_space < ICE_RING_QUARTER(xdp_ring))
+ free_space += ice_clean_xdp_irq_zc(xdp_ring);
+
+ if (unlikely(!free_space))
+ goto busy;
+
+ if (unlikely(xdp_buff_has_frags(xdp))) {
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ nr_frags = sinfo->nr_frags;
+ if (free_space < nr_frags + 1)
+ goto busy;
}
- dma = xsk_buff_xdp_get_dma(xdp);
- xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
-
- tx_buf = &xdp_ring->tx_buf[ntu];
- tx_buf->xdp = xdp;
- tx_buf->type = ICE_TX_BUF_XSK_TX;
tx_desc = ICE_TX_DESC(xdp_ring, ntu);
- tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
- 0, size, 0);
- xdp_ring->xdp_tx_active++;
+ tx_buf = &xdp_ring->tx_buf[ntu];
+ head = xdp;
+
+ for (;;) {
+ dma_addr_t dma;
+
+ dma = xsk_buff_xdp_get_dma(xdp);
+ xsk_buff_raw_dma_sync_for_device(xdp_ring->xsk_pool, dma, size);
+
+ tx_buf->xdp = xdp;
+ tx_buf->type = ICE_TX_BUF_XSK_TX;
+ tx_desc->buf_addr = cpu_to_le64(dma);
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(0, 0, size, 0);
+ /* account for each xdp_buff from xsk_buff_pool */
+ xdp_ring->xdp_tx_active++;
+
+ if (++ntu == xdp_ring->count)
+ ntu = 0;
+
+ if (frag == nr_frags)
+ break;
+
+ tx_desc = ICE_TX_DESC(xdp_ring, ntu);
+ tx_buf = &xdp_ring->tx_buf[ntu];
+
+ xdp = xsk_buff_get_frag(head);
+ size = skb_frag_size(&sinfo->frags[frag]);
+ frag++;
+ }
- if (++ntu == xdp_ring->count)
- ntu = 0;
xdp_ring->next_to_use = ntu;
+ /* update last descriptor from a frame with EOP */
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_EOP << ICE_TXD_QW1_CMD_S);
return ICE_XDP_TX;
+
+busy:
+ xdp_ring->ring_stats->tx_stats.tx_busy++;
+
+ return ICE_XDP_CONSUMED;
}
/**
@@ -752,6 +799,34 @@ out_failure:
return result;
}
+static int
+ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
+ struct xdp_buff *xdp, const unsigned int size)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(first);
+
+ if (!size)
+ return 0;
+
+ if (!xdp_buff_has_frags(first)) {
+ sinfo->nr_frags = 0;
+ sinfo->xdp_frags_size = 0;
+ xdp_buff_set_frags_flag(first);
+ }
+
+ if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
+ xsk_buff_free(first);
+ return -ENOMEM;
+ }
+
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
+ virt_to_page(xdp->data_hard_start), 0, size);
+ sinfo->xdp_frags_size += size;
+ xsk_buff_add_frag(xdp);
+
+ return 0;
+}
+
/**
* ice_clean_rx_irq_zc - consumes packets from the hardware ring
* @rx_ring: AF_XDP Rx ring
@@ -762,9 +837,14 @@ out_failure:
int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ struct xsk_buff_pool *xsk_pool = rx_ring->xsk_pool;
+ u32 ntc = rx_ring->next_to_clean;
+ u32 ntu = rx_ring->next_to_use;
+ struct xdp_buff *first = NULL;
struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
struct bpf_prog *xdp_prog;
+ u32 cnt = rx_ring->count;
bool failure = false;
int entries_to_alloc;
@@ -774,6 +854,9 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
xdp_prog = READ_ONCE(rx_ring->xdp_prog);
xdp_ring = rx_ring->xdp_ring;
+ if (ntc != rx_ring->first_desc)
+ first = *ice_xdp_buf(rx_ring, rx_ring->first_desc);
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
@@ -783,7 +866,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
u16 vlan_tag = 0;
u16 rx_ptype;
- rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
+ rx_desc = ICE_RX_DESC(rx_ring, ntc);
stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
if (!ice_test_staterr(rx_desc->wb.status_error0, stat_err_bits))
@@ -795,51 +878,61 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
*/
dma_rmb();
- if (unlikely(rx_ring->next_to_clean == rx_ring->next_to_use))
+ if (unlikely(ntc == ntu))
break;
- xdp = *ice_xdp_buf(rx_ring, rx_ring->next_to_clean);
+ xdp = *ice_xdp_buf(rx_ring, ntc);
size = le16_to_cpu(rx_desc->wb.pkt_len) &
ICE_RX_FLX_DESC_PKT_LEN_M;
- if (!size) {
- xdp->data = NULL;
- xdp->data_end = NULL;
- xdp->data_hard_start = NULL;
- xdp->data_meta = NULL;
- goto construct_skb;
- }
xsk_buff_set_size(xdp, size);
- xsk_buff_dma_sync_for_cpu(xdp, rx_ring->xsk_pool);
+ xsk_buff_dma_sync_for_cpu(xdp, xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, xdp, xdp_prog, xdp_ring);
+ if (!first) {
+ first = xdp;
+ xdp_buff_clear_frags_flag(first);
+ } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
+ break;
+ }
+
+ if (++ntc == cnt)
+ ntc = 0;
+
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+ xdp_res = ice_run_xdp_zc(rx_ring, first, xdp_prog, xdp_ring);
if (likely(xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))) {
xdp_xmit |= xdp_res;
} else if (xdp_res == ICE_XDP_EXIT) {
failure = true;
+ first = NULL;
+ rx_ring->first_desc = ntc;
break;
} else if (xdp_res == ICE_XDP_CONSUMED) {
- xsk_buff_free(xdp);
+ xsk_buff_free(first);
} else if (xdp_res == ICE_XDP_PASS) {
goto construct_skb;
}
- total_rx_bytes += size;
+ total_rx_bytes += xdp_get_buff_len(first);
total_rx_packets++;
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
continue;
construct_skb:
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, xdp);
+ skb = ice_construct_skb_zc(rx_ring, first);
if (!skb) {
rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
break;
}
- ice_bump_ntc(rx_ring);
+ first = NULL;
+ rx_ring->first_desc = ntc;
if (eth_skb_pad(skb)) {
skb = NULL;
@@ -858,18 +951,22 @@ construct_skb:
ice_receive_skb(rx_ring, skb, vlan_tag);
}
- entries_to_alloc = ICE_DESC_UNUSED(rx_ring);
+ rx_ring->next_to_clean = ntc;
+ entries_to_alloc = ICE_RX_DESC_UNUSED(rx_ring);
if (entries_to_alloc > ICE_RING_QUARTER(rx_ring))
failure |= !ice_alloc_rx_bufs_zc(rx_ring, entries_to_alloc);
ice_finalize_xdp_rx(xdp_ring, xdp_xmit, 0);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
- if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
- if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
- xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
+ if (xsk_uses_need_wakeup(xsk_pool)) {
+ /* ntu could have changed when allocating entries above, so
+ * use rx_ring value instead of stack based one
+ */
+ if (failure || ntc == rx_ring->next_to_use)
+ xsk_set_rx_need_wakeup(xsk_pool);
else
- xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
+ xsk_clear_rx_need_wakeup(xsk_pool);
return (int)total_rx_packets;
}
@@ -894,7 +991,7 @@ static void ice_xmit_pkt(struct ice_tx_ring *xdp_ring, struct xdp_desc *desc,
tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(desc),
0, desc->len, 0);
*total_bytes += desc->len;
@@ -921,7 +1018,7 @@ static void ice_xmit_pkt_batch(struct ice_tx_ring *xdp_ring, struct xdp_desc *de
tx_desc = ICE_TX_DESC(xdp_ring, ntu++);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(xsk_is_eop_desc(&descs[i]),
0, descs[i].len, 0);
*total_bytes += descs[i].len;